qemu/linux-user/syscall.c
<<
>>
Prefs
   1/*
   2 *  Linux syscalls
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 *  This program is free software; you can redistribute it and/or modify
   7 *  it under the terms of the GNU General Public License as published by
   8 *  the Free Software Foundation; either version 2 of the License, or
   9 *  (at your option) any later version.
  10 *
  11 *  This program is distributed in the hope that it will be useful,
  12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 *  GNU General Public License for more details.
  15 *
  16 *  You should have received a copy of the GNU General Public License
  17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#define _ATFILE_SOURCE
  20#include "qemu/osdep.h"
  21#include "qemu/cutils.h"
  22#include "qemu/path.h"
  23#include "qemu/memfd.h"
  24#include "qemu/queue.h"
  25#include <elf.h>
  26#include <endian.h>
  27#include <grp.h>
  28#include <sys/ipc.h>
  29#include <sys/msg.h>
  30#include <sys/wait.h>
  31#include <sys/mount.h>
  32#include <sys/file.h>
  33#include <sys/fsuid.h>
  34#include <sys/personality.h>
  35#include <sys/prctl.h>
  36#include <sys/resource.h>
  37#include <sys/swap.h>
  38#include <linux/capability.h>
  39#include <sched.h>
  40#include <sys/timex.h>
  41#include <sys/socket.h>
  42#include <linux/sockios.h>
  43#include <sys/un.h>
  44#include <sys/uio.h>
  45#include <poll.h>
  46#include <sys/times.h>
  47#include <sys/shm.h>
  48#include <sys/sem.h>
  49#include <sys/statfs.h>
  50#include <utime.h>
  51#include <sys/sysinfo.h>
  52#include <sys/signalfd.h>
  53//#include <sys/user.h>
  54#include <netinet/in.h>
  55#include <netinet/ip.h>
  56#include <netinet/tcp.h>
  57#include <netinet/udp.h>
  58#include <linux/wireless.h>
  59#include <linux/icmp.h>
  60#include <linux/icmpv6.h>
  61#include <linux/if_tun.h>
  62#include <linux/in6.h>
  63#include <linux/errqueue.h>
  64#include <linux/random.h>
  65#ifdef CONFIG_TIMERFD
  66#include <sys/timerfd.h>
  67#endif
  68#ifdef CONFIG_EVENTFD
  69#include <sys/eventfd.h>
  70#endif
  71#ifdef CONFIG_EPOLL
  72#include <sys/epoll.h>
  73#endif
  74#ifdef CONFIG_ATTR
  75#include "qemu/xattr.h"
  76#endif
  77#ifdef CONFIG_SENDFILE
  78#include <sys/sendfile.h>
  79#endif
  80#ifdef HAVE_SYS_KCOV_H
  81#include <sys/kcov.h>
  82#endif
  83
  84#define termios host_termios
  85#define winsize host_winsize
  86#define termio host_termio
  87#define sgttyb host_sgttyb /* same as target */
  88#define tchars host_tchars /* same as target */
  89#define ltchars host_ltchars /* same as target */
  90
  91#include <linux/termios.h>
  92#include <linux/unistd.h>
  93#include <linux/cdrom.h>
  94#include <linux/hdreg.h>
  95#include <linux/soundcard.h>
  96#include <linux/kd.h>
  97#include <linux/mtio.h>
  98#include <linux/fs.h>
  99#include <linux/fd.h>
 100#if defined(CONFIG_FIEMAP)
 101#include <linux/fiemap.h>
 102#endif
 103#include <linux/fb.h>
 104#if defined(CONFIG_USBFS)
 105#include <linux/usbdevice_fs.h>
 106#include <linux/usb/ch9.h>
 107#endif
 108#include <linux/vt.h>
 109#include <linux/dm-ioctl.h>
 110#include <linux/reboot.h>
 111#include <linux/route.h>
 112#include <linux/filter.h>
 113#include <linux/blkpg.h>
 114#include <netpacket/packet.h>
 115#include <linux/netlink.h>
 116#include <linux/if_alg.h>
 117#include <linux/rtc.h>
 118#include <sound/asound.h>
 119#ifdef HAVE_BTRFS_H
 120#include <linux/btrfs.h>
 121#endif
 122#ifdef HAVE_DRM_H
 123#include <libdrm/drm.h>
 124#include <libdrm/i915_drm.h>
 125#endif
 126#include "linux_loop.h"
 127#include "uname.h"
 128
 129#include "qemu.h"
 130#include "qemu/guest-random.h"
 131#include "qemu/selfmap.h"
 132#include "user/syscall-trace.h"
 133#include "qapi/error.h"
 134#include "fd-trans.h"
 135#include "tcg/tcg.h"
 136
 137#ifndef CLONE_IO
 138#define CLONE_IO                0x80000000      /* Clone io context */
 139#endif
 140
 141/* We can't directly call the host clone syscall, because this will
 142 * badly confuse libc (breaking mutexes, for example). So we must
 143 * divide clone flags into:
 144 *  * flag combinations that look like pthread_create()
 145 *  * flag combinations that look like fork()
 146 *  * flags we can implement within QEMU itself
 147 *  * flags we can't support and will return an error for
 148 */
 149/* For thread creation, all these flags must be present; for
 150 * fork, none must be present.
 151 */
 152#define CLONE_THREAD_FLAGS                              \
 153    (CLONE_VM | CLONE_FS | CLONE_FILES |                \
 154     CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
 155
 156/* These flags are ignored:
 157 * CLONE_DETACHED is now ignored by the kernel;
 158 * CLONE_IO is just an optimisation hint to the I/O scheduler
 159 */
 160#define CLONE_IGNORED_FLAGS                     \
 161    (CLONE_DETACHED | CLONE_IO)
 162
 163/* Flags for fork which we can implement within QEMU itself */
 164#define CLONE_OPTIONAL_FORK_FLAGS               \
 165    (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
 166     CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
 167
 168/* Flags for thread creation which we can implement within QEMU itself */
 169#define CLONE_OPTIONAL_THREAD_FLAGS                             \
 170    (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
 171     CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
 172
 173#define CLONE_INVALID_FORK_FLAGS                                        \
 174    (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
 175
 176#define CLONE_INVALID_THREAD_FLAGS                                      \
 177    (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
 178       CLONE_IGNORED_FLAGS))
 179
 180/* CLONE_VFORK is special cased early in do_fork(). The other flag bits
 181 * have almost all been allocated. We cannot support any of
 182 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
 183 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
 184 * The checks against the invalid thread masks above will catch these.
 185 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
 186 */
 187
 188/* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
 189 * once. This exercises the codepaths for restart.
 190 */
 191//#define DEBUG_ERESTARTSYS
 192
 193//#include <linux/msdos_fs.h>
 194#define VFAT_IOCTL_READDIR_BOTH         _IOR('r', 1, struct linux_dirent [2])
 195#define VFAT_IOCTL_READDIR_SHORT        _IOR('r', 2, struct linux_dirent [2])
 196
 197#undef _syscall0
 198#undef _syscall1
 199#undef _syscall2
 200#undef _syscall3
 201#undef _syscall4
 202#undef _syscall5
 203#undef _syscall6
 204
 205#define _syscall0(type,name)            \
 206static type name (void)                 \
 207{                                       \
 208        return syscall(__NR_##name);    \
 209}
 210
 211#define _syscall1(type,name,type1,arg1)         \
 212static type name (type1 arg1)                   \
 213{                                               \
 214        return syscall(__NR_##name, arg1);      \
 215}
 216
 217#define _syscall2(type,name,type1,arg1,type2,arg2)      \
 218static type name (type1 arg1,type2 arg2)                \
 219{                                                       \
 220        return syscall(__NR_##name, arg1, arg2);        \
 221}
 222
 223#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)   \
 224static type name (type1 arg1,type2 arg2,type3 arg3)             \
 225{                                                               \
 226        return syscall(__NR_##name, arg1, arg2, arg3);          \
 227}
 228
 229#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)        \
 230static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)                  \
 231{                                                                               \
 232        return syscall(__NR_##name, arg1, arg2, arg3, arg4);                    \
 233}
 234
 235#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,        \
 236                  type5,arg5)                                                   \
 237static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)       \
 238{                                                                               \
 239        return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);              \
 240}
 241
 242
 243#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,        \
 244                  type5,arg5,type6,arg6)                                        \
 245static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,       \
 246                  type6 arg6)                                                   \
 247{                                                                               \
 248        return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);        \
 249}
 250
 251
 252#define __NR_sys_uname __NR_uname
 253#define __NR_sys_getcwd1 __NR_getcwd
 254#define __NR_sys_getdents __NR_getdents
 255#define __NR_sys_getdents64 __NR_getdents64
 256#define __NR_sys_getpriority __NR_getpriority
 257#define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
 258#define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
 259#define __NR_sys_syslog __NR_syslog
 260#if defined(__NR_futex)
 261# define __NR_sys_futex __NR_futex
 262#endif
 263#if defined(__NR_futex_time64)
 264# define __NR_sys_futex_time64 __NR_futex_time64
 265#endif
 266#define __NR_sys_inotify_init __NR_inotify_init
 267#define __NR_sys_inotify_add_watch __NR_inotify_add_watch
 268#define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
 269#define __NR_sys_statx __NR_statx
 270
 271#if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
 272#define __NR__llseek __NR_lseek
 273#endif
 274
 275/* Newer kernel ports have llseek() instead of _llseek() */
 276#if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
 277#define TARGET_NR__llseek TARGET_NR_llseek
 278#endif
 279
 280/* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
 281#ifndef TARGET_O_NONBLOCK_MASK
 282#define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
 283#endif
 284
 285#define __NR_sys_gettid __NR_gettid
 286_syscall0(int, sys_gettid)
 287
 288/* For the 64-bit guest on 32-bit host case we must emulate
 289 * getdents using getdents64, because otherwise the host
 290 * might hand us back more dirent records than we can fit
 291 * into the guest buffer after structure format conversion.
 292 * Otherwise we emulate getdents with getdents if the host has it.
 293 */
 294#if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
 295#define EMULATE_GETDENTS_WITH_GETDENTS
 296#endif
 297
 298#if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
 299_syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
 300#endif
 301#if (defined(TARGET_NR_getdents) && \
 302      !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
 303    (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
 304_syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
 305#endif
 306#if defined(TARGET_NR__llseek) && defined(__NR_llseek)
 307_syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
 308          loff_t *, res, uint, wh);
 309#endif
 310_syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
 311_syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
 312          siginfo_t *, uinfo)
 313_syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
 314#ifdef __NR_exit_group
 315_syscall1(int,exit_group,int,error_code)
 316#endif
 317#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
 318_syscall1(int,set_tid_address,int *,tidptr)
 319#endif
 320#if defined(__NR_futex)
 321_syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
 322          const struct timespec *,timeout,int *,uaddr2,int,val3)
 323#endif
 324#if defined(__NR_futex_time64)
 325_syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
 326          const struct timespec *,timeout,int *,uaddr2,int,val3)
 327#endif
 328#define __NR_sys_sched_getaffinity __NR_sched_getaffinity
 329_syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
 330          unsigned long *, user_mask_ptr);
 331#define __NR_sys_sched_setaffinity __NR_sched_setaffinity
 332_syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
 333          unsigned long *, user_mask_ptr);
 334#define __NR_sys_getcpu __NR_getcpu
 335_syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
 336_syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
 337          void *, arg);
 338_syscall2(int, capget, struct __user_cap_header_struct *, header,
 339          struct __user_cap_data_struct *, data);
 340_syscall2(int, capset, struct __user_cap_header_struct *, header,
 341          struct __user_cap_data_struct *, data);
 342#if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
 343_syscall2(int, ioprio_get, int, which, int, who)
 344#endif
 345#if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
 346_syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
 347#endif
 348#if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
 349_syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
 350#endif
 351
 352#if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
 353_syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
 354          unsigned long, idx1, unsigned long, idx2)
 355#endif
 356
 357/*
 358 * It is assumed that struct statx is architecture independent.
 359 */
 360#if defined(TARGET_NR_statx) && defined(__NR_statx)
 361_syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
 362          unsigned int, mask, struct target_statx *, statxbuf)
 363#endif
 364#if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
 365_syscall2(int, membarrier, int, cmd, int, flags)
 366#endif
 367
 368static bitmask_transtbl fcntl_flags_tbl[] = {
 369  { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
 370  { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
 371  { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
 372  { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
 373  { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
 374  { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
 375  { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
 376  { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
 377  { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
 378  { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
 379  { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
 380  { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
 381  { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
 382#if defined(O_DIRECT)
 383  { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
 384#endif
 385#if defined(O_NOATIME)
 386  { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
 387#endif
 388#if defined(O_CLOEXEC)
 389  { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
 390#endif
 391#if defined(O_PATH)
 392  { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
 393#endif
 394#if defined(O_TMPFILE)
 395  { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
 396#endif
 397  /* Don't terminate the list prematurely on 64-bit host+guest.  */
 398#if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
 399  { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
 400#endif
 401  { 0, 0, 0, 0 }
 402};
 403
 404_syscall2(int, sys_getcwd1, char *, buf, size_t, size)
 405
 406#if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
 407#if defined(__NR_utimensat)
 408#define __NR_sys_utimensat __NR_utimensat
 409_syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
 410          const struct timespec *,tsp,int,flags)
 411#else
 412static int sys_utimensat(int dirfd, const char *pathname,
 413                         const struct timespec times[2], int flags)
 414{
 415    errno = ENOSYS;
 416    return -1;
 417}
 418#endif
 419#endif /* TARGET_NR_utimensat */
 420
 421#ifdef TARGET_NR_renameat2
 422#if defined(__NR_renameat2)
 423#define __NR_sys_renameat2 __NR_renameat2
 424_syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
 425          const char *, new, unsigned int, flags)
 426#else
 427static int sys_renameat2(int oldfd, const char *old,
 428                         int newfd, const char *new, int flags)
 429{
 430    if (flags == 0) {
 431        return renameat(oldfd, old, newfd, new);
 432    }
 433    errno = ENOSYS;
 434    return -1;
 435}
 436#endif
 437#endif /* TARGET_NR_renameat2 */
 438
 439#ifdef CONFIG_INOTIFY
 440#include <sys/inotify.h>
 441
 442#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
 443static int sys_inotify_init(void)
 444{
 445  return (inotify_init());
 446}
 447#endif
 448#if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
 449static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
 450{
 451  return (inotify_add_watch(fd, pathname, mask));
 452}
 453#endif
 454#if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
 455static int sys_inotify_rm_watch(int fd, int32_t wd)
 456{
 457  return (inotify_rm_watch(fd, wd));
 458}
 459#endif
 460#ifdef CONFIG_INOTIFY1
 461#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
 462static int sys_inotify_init1(int flags)
 463{
 464  return (inotify_init1(flags));
 465}
 466#endif
 467#endif
 468#else
 469/* Userspace can usually survive runtime without inotify */
 470#undef TARGET_NR_inotify_init
 471#undef TARGET_NR_inotify_init1
 472#undef TARGET_NR_inotify_add_watch
 473#undef TARGET_NR_inotify_rm_watch
 474#endif /* CONFIG_INOTIFY  */
 475
 476#if defined(TARGET_NR_prlimit64)
 477#ifndef __NR_prlimit64
 478# define __NR_prlimit64 -1
 479#endif
 480#define __NR_sys_prlimit64 __NR_prlimit64
 481/* The glibc rlimit structure may not be that used by the underlying syscall */
 482struct host_rlimit64 {
 483    uint64_t rlim_cur;
 484    uint64_t rlim_max;
 485};
 486_syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
 487          const struct host_rlimit64 *, new_limit,
 488          struct host_rlimit64 *, old_limit)
 489#endif
 490
 491
 492#if defined(TARGET_NR_timer_create)
 493/* Maximum of 32 active POSIX timers allowed at any one time. */
 494static timer_t g_posix_timers[32] = { 0, } ;
 495
 496static inline int next_free_host_timer(void)
 497{
 498    int k ;
 499    /* FIXME: Does finding the next free slot require a lock? */
 500    for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
 501        if (g_posix_timers[k] == 0) {
 502            g_posix_timers[k] = (timer_t) 1;
 503            return k;
 504        }
 505    }
 506    return -1;
 507}
 508#endif
 509
 510#define ERRNO_TABLE_SIZE 1200
 511
 512/* target_to_host_errno_table[] is initialized from
 513 * host_to_target_errno_table[] in syscall_init(). */
 514static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
 515};
 516
 517/*
 518 * This list is the union of errno values overridden in asm-<arch>/errno.h
 519 * minus the errnos that are not actually generic to all archs.
 520 */
 521static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
 522    [EAGAIN]            = TARGET_EAGAIN,
 523    [EIDRM]             = TARGET_EIDRM,
 524    [ECHRNG]            = TARGET_ECHRNG,
 525    [EL2NSYNC]          = TARGET_EL2NSYNC,
 526    [EL3HLT]            = TARGET_EL3HLT,
 527    [EL3RST]            = TARGET_EL3RST,
 528    [ELNRNG]            = TARGET_ELNRNG,
 529    [EUNATCH]           = TARGET_EUNATCH,
 530    [ENOCSI]            = TARGET_ENOCSI,
 531    [EL2HLT]            = TARGET_EL2HLT,
 532    [EDEADLK]           = TARGET_EDEADLK,
 533    [ENOLCK]            = TARGET_ENOLCK,
 534    [EBADE]             = TARGET_EBADE,
 535    [EBADR]             = TARGET_EBADR,
 536    [EXFULL]            = TARGET_EXFULL,
 537    [ENOANO]            = TARGET_ENOANO,
 538    [EBADRQC]           = TARGET_EBADRQC,
 539    [EBADSLT]           = TARGET_EBADSLT,
 540    [EBFONT]            = TARGET_EBFONT,
 541    [ENOSTR]            = TARGET_ENOSTR,
 542    [ENODATA]           = TARGET_ENODATA,
 543    [ETIME]             = TARGET_ETIME,
 544    [ENOSR]             = TARGET_ENOSR,
 545    [ENONET]            = TARGET_ENONET,
 546    [ENOPKG]            = TARGET_ENOPKG,
 547    [EREMOTE]           = TARGET_EREMOTE,
 548    [ENOLINK]           = TARGET_ENOLINK,
 549    [EADV]              = TARGET_EADV,
 550    [ESRMNT]            = TARGET_ESRMNT,
 551    [ECOMM]             = TARGET_ECOMM,
 552    [EPROTO]            = TARGET_EPROTO,
 553    [EDOTDOT]           = TARGET_EDOTDOT,
 554    [EMULTIHOP]         = TARGET_EMULTIHOP,
 555    [EBADMSG]           = TARGET_EBADMSG,
 556    [ENAMETOOLONG]      = TARGET_ENAMETOOLONG,
 557    [EOVERFLOW]         = TARGET_EOVERFLOW,
 558    [ENOTUNIQ]          = TARGET_ENOTUNIQ,
 559    [EBADFD]            = TARGET_EBADFD,
 560    [EREMCHG]           = TARGET_EREMCHG,
 561    [ELIBACC]           = TARGET_ELIBACC,
 562    [ELIBBAD]           = TARGET_ELIBBAD,
 563    [ELIBSCN]           = TARGET_ELIBSCN,
 564    [ELIBMAX]           = TARGET_ELIBMAX,
 565    [ELIBEXEC]          = TARGET_ELIBEXEC,
 566    [EILSEQ]            = TARGET_EILSEQ,
 567    [ENOSYS]            = TARGET_ENOSYS,
 568    [ELOOP]             = TARGET_ELOOP,
 569    [ERESTART]          = TARGET_ERESTART,
 570    [ESTRPIPE]          = TARGET_ESTRPIPE,
 571    [ENOTEMPTY]         = TARGET_ENOTEMPTY,
 572    [EUSERS]            = TARGET_EUSERS,
 573    [ENOTSOCK]          = TARGET_ENOTSOCK,
 574    [EDESTADDRREQ]      = TARGET_EDESTADDRREQ,
 575    [EMSGSIZE]          = TARGET_EMSGSIZE,
 576    [EPROTOTYPE]        = TARGET_EPROTOTYPE,
 577    [ENOPROTOOPT]       = TARGET_ENOPROTOOPT,
 578    [EPROTONOSUPPORT]   = TARGET_EPROTONOSUPPORT,
 579    [ESOCKTNOSUPPORT]   = TARGET_ESOCKTNOSUPPORT,
 580    [EOPNOTSUPP]        = TARGET_EOPNOTSUPP,
 581    [EPFNOSUPPORT]      = TARGET_EPFNOSUPPORT,
 582    [EAFNOSUPPORT]      = TARGET_EAFNOSUPPORT,
 583    [EADDRINUSE]        = TARGET_EADDRINUSE,
 584    [EADDRNOTAVAIL]     = TARGET_EADDRNOTAVAIL,
 585    [ENETDOWN]          = TARGET_ENETDOWN,
 586    [ENETUNREACH]       = TARGET_ENETUNREACH,
 587    [ENETRESET]         = TARGET_ENETRESET,
 588    [ECONNABORTED]      = TARGET_ECONNABORTED,
 589    [ECONNRESET]        = TARGET_ECONNRESET,
 590    [ENOBUFS]           = TARGET_ENOBUFS,
 591    [EISCONN]           = TARGET_EISCONN,
 592    [ENOTCONN]          = TARGET_ENOTCONN,
 593    [EUCLEAN]           = TARGET_EUCLEAN,
 594    [ENOTNAM]           = TARGET_ENOTNAM,
 595    [ENAVAIL]           = TARGET_ENAVAIL,
 596    [EISNAM]            = TARGET_EISNAM,
 597    [EREMOTEIO]         = TARGET_EREMOTEIO,
 598    [EDQUOT]            = TARGET_EDQUOT,
 599    [ESHUTDOWN]         = TARGET_ESHUTDOWN,
 600    [ETOOMANYREFS]      = TARGET_ETOOMANYREFS,
 601    [ETIMEDOUT]         = TARGET_ETIMEDOUT,
 602    [ECONNREFUSED]      = TARGET_ECONNREFUSED,
 603    [EHOSTDOWN]         = TARGET_EHOSTDOWN,
 604    [EHOSTUNREACH]      = TARGET_EHOSTUNREACH,
 605    [EALREADY]          = TARGET_EALREADY,
 606    [EINPROGRESS]       = TARGET_EINPROGRESS,
 607    [ESTALE]            = TARGET_ESTALE,
 608    [ECANCELED]         = TARGET_ECANCELED,
 609    [ENOMEDIUM]         = TARGET_ENOMEDIUM,
 610    [EMEDIUMTYPE]       = TARGET_EMEDIUMTYPE,
 611#ifdef ENOKEY
 612    [ENOKEY]            = TARGET_ENOKEY,
 613#endif
 614#ifdef EKEYEXPIRED
 615    [EKEYEXPIRED]       = TARGET_EKEYEXPIRED,
 616#endif
 617#ifdef EKEYREVOKED
 618    [EKEYREVOKED]       = TARGET_EKEYREVOKED,
 619#endif
 620#ifdef EKEYREJECTED
 621    [EKEYREJECTED]      = TARGET_EKEYREJECTED,
 622#endif
 623#ifdef EOWNERDEAD
 624    [EOWNERDEAD]        = TARGET_EOWNERDEAD,
 625#endif
 626#ifdef ENOTRECOVERABLE
 627    [ENOTRECOVERABLE]   = TARGET_ENOTRECOVERABLE,
 628#endif
 629#ifdef ENOMSG
 630    [ENOMSG]            = TARGET_ENOMSG,
 631#endif
 632#ifdef ERKFILL
 633    [ERFKILL]           = TARGET_ERFKILL,
 634#endif
 635#ifdef EHWPOISON
 636    [EHWPOISON]         = TARGET_EHWPOISON,
 637#endif
 638};
 639
 640static inline int host_to_target_errno(int err)
 641{
 642    if (err >= 0 && err < ERRNO_TABLE_SIZE &&
 643        host_to_target_errno_table[err]) {
 644        return host_to_target_errno_table[err];
 645    }
 646    return err;
 647}
 648
 649static inline int target_to_host_errno(int err)
 650{
 651    if (err >= 0 && err < ERRNO_TABLE_SIZE &&
 652        target_to_host_errno_table[err]) {
 653        return target_to_host_errno_table[err];
 654    }
 655    return err;
 656}
 657
 658static inline abi_long get_errno(abi_long ret)
 659{
 660    if (ret == -1)
 661        return -host_to_target_errno(errno);
 662    else
 663        return ret;
 664}
 665
 666const char *target_strerror(int err)
 667{
 668    if (err == TARGET_ERESTARTSYS) {
 669        return "To be restarted";
 670    }
 671    if (err == TARGET_QEMU_ESIGRETURN) {
 672        return "Successful exit from sigreturn";
 673    }
 674
 675    if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
 676        return NULL;
 677    }
 678    return strerror(target_to_host_errno(err));
 679}
 680
 681#define safe_syscall0(type, name) \
 682static type safe_##name(void) \
 683{ \
 684    return safe_syscall(__NR_##name); \
 685}
 686
 687#define safe_syscall1(type, name, type1, arg1) \
 688static type safe_##name(type1 arg1) \
 689{ \
 690    return safe_syscall(__NR_##name, arg1); \
 691}
 692
 693#define safe_syscall2(type, name, type1, arg1, type2, arg2) \
 694static type safe_##name(type1 arg1, type2 arg2) \
 695{ \
 696    return safe_syscall(__NR_##name, arg1, arg2); \
 697}
 698
 699#define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
 700static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
 701{ \
 702    return safe_syscall(__NR_##name, arg1, arg2, arg3); \
 703}
 704
 705#define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
 706    type4, arg4) \
 707static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
 708{ \
 709    return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
 710}
 711
 712#define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
 713    type4, arg4, type5, arg5) \
 714static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
 715    type5 arg5) \
 716{ \
 717    return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
 718}
 719
 720#define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
 721    type4, arg4, type5, arg5, type6, arg6) \
 722static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
 723    type5 arg5, type6 arg6) \
 724{ \
 725    return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
 726}
 727
 728safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
 729safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
 730safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
 731              int, flags, mode_t, mode)
 732#if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
 733safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
 734              struct rusage *, rusage)
 735#endif
 736safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
 737              int, options, struct rusage *, rusage)
 738safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
 739#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
 740    defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
 741safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
 742              fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
 743#endif
 744#if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
 745safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
 746              struct timespec *, tsp, const sigset_t *, sigmask,
 747              size_t, sigsetsize)
 748#endif
 749safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
 750              int, maxevents, int, timeout, const sigset_t *, sigmask,
 751              size_t, sigsetsize)
 752#if defined(__NR_futex)
 753safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
 754              const struct timespec *,timeout,int *,uaddr2,int,val3)
 755#endif
 756#if defined(__NR_futex_time64)
 757safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
 758              const struct timespec *,timeout,int *,uaddr2,int,val3)
 759#endif
 760safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
 761safe_syscall2(int, kill, pid_t, pid, int, sig)
 762safe_syscall2(int, tkill, int, tid, int, sig)
 763safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
 764safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
 765safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
 766safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
 767              unsigned long, pos_l, unsigned long, pos_h)
 768safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
 769              unsigned long, pos_l, unsigned long, pos_h)
 770safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
 771              socklen_t, addrlen)
 772safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
 773              int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
 774safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
 775              int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
 776safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
 777safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
 778safe_syscall2(int, flock, int, fd, int, operation)
 779#if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
 780safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
 781              const struct timespec *, uts, size_t, sigsetsize)
 782#endif
 783safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
 784              int, flags)
 785#if defined(TARGET_NR_nanosleep)
 786safe_syscall2(int, nanosleep, const struct timespec *, req,
 787              struct timespec *, rem)
 788#endif
 789#if defined(TARGET_NR_clock_nanosleep) || \
 790    defined(TARGET_NR_clock_nanosleep_time64)
 791safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
 792              const struct timespec *, req, struct timespec *, rem)
 793#endif
 794#ifdef __NR_ipc
 795#ifdef __s390x__
 796safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
 797              void *, ptr)
 798#else
 799safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
 800              void *, ptr, long, fifth)
 801#endif
 802#endif
 803#ifdef __NR_msgsnd
 804safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
 805              int, flags)
 806#endif
 807#ifdef __NR_msgrcv
 808safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
 809              long, msgtype, int, flags)
 810#endif
 811#ifdef __NR_semtimedop
 812safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
 813              unsigned, nsops, const struct timespec *, timeout)
 814#endif
 815#if defined(TARGET_NR_mq_timedsend) || \
 816    defined(TARGET_NR_mq_timedsend_time64)
 817safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
 818              size_t, len, unsigned, prio, const struct timespec *, timeout)
 819#endif
 820#if defined(TARGET_NR_mq_timedreceive) || \
 821    defined(TARGET_NR_mq_timedreceive_time64)
 822safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
 823              size_t, len, unsigned *, prio, const struct timespec *, timeout)
 824#endif
 825#if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
 826safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
 827              int, outfd, loff_t *, poutoff, size_t, length,
 828              unsigned int, flags)
 829#endif
 830
 831/* We do ioctl like this rather than via safe_syscall3 to preserve the
 832 * "third argument might be integer or pointer or not present" behaviour of
 833 * the libc function.
 834 */
 835#define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
 836/* Similarly for fcntl. Note that callers must always:
 837 *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
 838 *  use the flock64 struct rather than unsuffixed flock
 839 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
 840 */
 841#ifdef __NR_fcntl64
 842#define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
 843#else
 844#define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
 845#endif
 846
 847static inline int host_to_target_sock_type(int host_type)
 848{
 849    int target_type;
 850
 851    switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
 852    case SOCK_DGRAM:
 853        target_type = TARGET_SOCK_DGRAM;
 854        break;
 855    case SOCK_STREAM:
 856        target_type = TARGET_SOCK_STREAM;
 857        break;
 858    default:
 859        target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
 860        break;
 861    }
 862
 863#if defined(SOCK_CLOEXEC)
 864    if (host_type & SOCK_CLOEXEC) {
 865        target_type |= TARGET_SOCK_CLOEXEC;
 866    }
 867#endif
 868
 869#if defined(SOCK_NONBLOCK)
 870    if (host_type & SOCK_NONBLOCK) {
 871        target_type |= TARGET_SOCK_NONBLOCK;
 872    }
 873#endif
 874
 875    return target_type;
 876}
 877
 878static abi_ulong target_brk;
 879static abi_ulong target_original_brk;
 880static abi_ulong brk_page;
 881
 882void target_set_brk(abi_ulong new_brk)
 883{
 884    target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
 885    brk_page = HOST_PAGE_ALIGN(target_brk);
 886}
 887
 888//#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
 889#define DEBUGF_BRK(message, args...)
 890
 891/* do_brk() must return target values and target errnos. */
 892abi_long do_brk(abi_ulong new_brk)
 893{
 894    abi_long mapped_addr;
 895    abi_ulong new_alloc_size;
 896
 897    /* brk pointers are always untagged */
 898
 899    DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
 900
 901    if (!new_brk) {
 902        DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
 903        return target_brk;
 904    }
 905    if (new_brk < target_original_brk) {
 906        DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
 907                   target_brk);
 908        return target_brk;
 909    }
 910
 911    /* If the new brk is less than the highest page reserved to the
 912     * target heap allocation, set it and we're almost done...  */
 913    if (new_brk <= brk_page) {
 914        /* Heap contents are initialized to zero, as for anonymous
 915         * mapped pages.  */
 916        if (new_brk > target_brk) {
 917            memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
 918        }
 919        target_brk = new_brk;
 920        DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
 921        return target_brk;
 922    }
 923
 924    /* We need to allocate more memory after the brk... Note that
 925     * we don't use MAP_FIXED because that will map over the top of
 926     * any existing mapping (like the one with the host libc or qemu
 927     * itself); instead we treat "mapped but at wrong address" as
 928     * a failure and unmap again.
 929     */
 930    new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
 931    mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
 932                                        PROT_READ|PROT_WRITE,
 933                                        MAP_ANON|MAP_PRIVATE, 0, 0));
 934
 935    if (mapped_addr == brk_page) {
 936        /* Heap contents are initialized to zero, as for anonymous
 937         * mapped pages.  Technically the new pages are already
 938         * initialized to zero since they *are* anonymous mapped
 939         * pages, however we have to take care with the contents that
 940         * come from the remaining part of the previous page: it may
 941         * contains garbage data due to a previous heap usage (grown
 942         * then shrunken).  */
 943        memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
 944
 945        target_brk = new_brk;
 946        brk_page = HOST_PAGE_ALIGN(target_brk);
 947        DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
 948            target_brk);
 949        return target_brk;
 950    } else if (mapped_addr != -1) {
 951        /* Mapped but at wrong address, meaning there wasn't actually
 952         * enough space for this brk.
 953         */
 954        target_munmap(mapped_addr, new_alloc_size);
 955        mapped_addr = -1;
 956        DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
 957    }
 958    else {
 959        DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
 960    }
 961
 962#if defined(TARGET_ALPHA)
 963    /* We (partially) emulate OSF/1 on Alpha, which requires we
 964       return a proper errno, not an unchanged brk value.  */
 965    return -TARGET_ENOMEM;
 966#endif
 967    /* For everything else, return the previous break. */
 968    return target_brk;
 969}
 970
 971#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
 972    defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
 973static inline abi_long copy_from_user_fdset(fd_set *fds,
 974                                            abi_ulong target_fds_addr,
 975                                            int n)
 976{
 977    int i, nw, j, k;
 978    abi_ulong b, *target_fds;
 979
 980    nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
 981    if (!(target_fds = lock_user(VERIFY_READ,
 982                                 target_fds_addr,
 983                                 sizeof(abi_ulong) * nw,
 984                                 1)))
 985        return -TARGET_EFAULT;
 986
 987    FD_ZERO(fds);
 988    k = 0;
 989    for (i = 0; i < nw; i++) {
 990        /* grab the abi_ulong */
 991        __get_user(b, &target_fds[i]);
 992        for (j = 0; j < TARGET_ABI_BITS; j++) {
 993            /* check the bit inside the abi_ulong */
 994            if ((b >> j) & 1)
 995                FD_SET(k, fds);
 996            k++;
 997        }
 998    }
 999
1000    unlock_user(target_fds, target_fds_addr, 0);
1001
1002    return 0;
1003}
1004
1005static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1006                                                 abi_ulong target_fds_addr,
1007                                                 int n)
1008{
1009    if (target_fds_addr) {
1010        if (copy_from_user_fdset(fds, target_fds_addr, n))
1011            return -TARGET_EFAULT;
1012        *fds_ptr = fds;
1013    } else {
1014        *fds_ptr = NULL;
1015    }
1016    return 0;
1017}
1018
1019static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1020                                          const fd_set *fds,
1021                                          int n)
1022{
1023    int i, nw, j, k;
1024    abi_long v;
1025    abi_ulong *target_fds;
1026
1027    nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1028    if (!(target_fds = lock_user(VERIFY_WRITE,
1029                                 target_fds_addr,
1030                                 sizeof(abi_ulong) * nw,
1031                                 0)))
1032        return -TARGET_EFAULT;
1033
1034    k = 0;
1035    for (i = 0; i < nw; i++) {
1036        v = 0;
1037        for (j = 0; j < TARGET_ABI_BITS; j++) {
1038            v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1039            k++;
1040        }
1041        __put_user(v, &target_fds[i]);
1042    }
1043
1044    unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1045
1046    return 0;
1047}
1048#endif
1049
1050#if defined(__alpha__)
1051#define HOST_HZ 1024
1052#else
1053#define HOST_HZ 100
1054#endif
1055
1056static inline abi_long host_to_target_clock_t(long ticks)
1057{
1058#if HOST_HZ == TARGET_HZ
1059    return ticks;
1060#else
1061    return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1062#endif
1063}
1064
1065static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1066                                             const struct rusage *rusage)
1067{
1068    struct target_rusage *target_rusage;
1069
1070    if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1071        return -TARGET_EFAULT;
1072    target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1073    target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1074    target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1075    target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1076    target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1077    target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1078    target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1079    target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1080    target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1081    target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1082    target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1083    target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1084    target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1085    target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1086    target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1087    target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1088    target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1089    target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1090    unlock_user_struct(target_rusage, target_addr, 1);
1091
1092    return 0;
1093}
1094
1095#ifdef TARGET_NR_setrlimit
1096static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1097{
1098    abi_ulong target_rlim_swap;
1099    rlim_t result;
1100    
1101    target_rlim_swap = tswapal(target_rlim);
1102    if (target_rlim_swap == TARGET_RLIM_INFINITY)
1103        return RLIM_INFINITY;
1104
1105    result = target_rlim_swap;
1106    if (target_rlim_swap != (rlim_t)result)
1107        return RLIM_INFINITY;
1108    
1109    return result;
1110}
1111#endif
1112
1113#if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1114static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1115{
1116    abi_ulong target_rlim_swap;
1117    abi_ulong result;
1118    
1119    if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1120        target_rlim_swap = TARGET_RLIM_INFINITY;
1121    else
1122        target_rlim_swap = rlim;
1123    result = tswapal(target_rlim_swap);
1124    
1125    return result;
1126}
1127#endif
1128
1129static inline int target_to_host_resource(int code)
1130{
1131    switch (code) {
1132    case TARGET_RLIMIT_AS:
1133        return RLIMIT_AS;
1134    case TARGET_RLIMIT_CORE:
1135        return RLIMIT_CORE;
1136    case TARGET_RLIMIT_CPU:
1137        return RLIMIT_CPU;
1138    case TARGET_RLIMIT_DATA:
1139        return RLIMIT_DATA;
1140    case TARGET_RLIMIT_FSIZE:
1141        return RLIMIT_FSIZE;
1142    case TARGET_RLIMIT_LOCKS:
1143        return RLIMIT_LOCKS;
1144    case TARGET_RLIMIT_MEMLOCK:
1145        return RLIMIT_MEMLOCK;
1146    case TARGET_RLIMIT_MSGQUEUE:
1147        return RLIMIT_MSGQUEUE;
1148    case TARGET_RLIMIT_NICE:
1149        return RLIMIT_NICE;
1150    case TARGET_RLIMIT_NOFILE:
1151        return RLIMIT_NOFILE;
1152    case TARGET_RLIMIT_NPROC:
1153        return RLIMIT_NPROC;
1154    case TARGET_RLIMIT_RSS:
1155        return RLIMIT_RSS;
1156    case TARGET_RLIMIT_RTPRIO:
1157        return RLIMIT_RTPRIO;
1158    case TARGET_RLIMIT_SIGPENDING:
1159        return RLIMIT_SIGPENDING;
1160    case TARGET_RLIMIT_STACK:
1161        return RLIMIT_STACK;
1162    default:
1163        return code;
1164    }
1165}
1166
1167static inline abi_long copy_from_user_timeval(struct timeval *tv,
1168                                              abi_ulong target_tv_addr)
1169{
1170    struct target_timeval *target_tv;
1171
1172    if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1173        return -TARGET_EFAULT;
1174    }
1175
1176    __get_user(tv->tv_sec, &target_tv->tv_sec);
1177    __get_user(tv->tv_usec, &target_tv->tv_usec);
1178
1179    unlock_user_struct(target_tv, target_tv_addr, 0);
1180
1181    return 0;
1182}
1183
1184static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1185                                            const struct timeval *tv)
1186{
1187    struct target_timeval *target_tv;
1188
1189    if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1190        return -TARGET_EFAULT;
1191    }
1192
1193    __put_user(tv->tv_sec, &target_tv->tv_sec);
1194    __put_user(tv->tv_usec, &target_tv->tv_usec);
1195
1196    unlock_user_struct(target_tv, target_tv_addr, 1);
1197
1198    return 0;
1199}
1200
1201#if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1202static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1203                                                abi_ulong target_tv_addr)
1204{
1205    struct target__kernel_sock_timeval *target_tv;
1206
1207    if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1208        return -TARGET_EFAULT;
1209    }
1210
1211    __get_user(tv->tv_sec, &target_tv->tv_sec);
1212    __get_user(tv->tv_usec, &target_tv->tv_usec);
1213
1214    unlock_user_struct(target_tv, target_tv_addr, 0);
1215
1216    return 0;
1217}
1218#endif
1219
1220static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1221                                              const struct timeval *tv)
1222{
1223    struct target__kernel_sock_timeval *target_tv;
1224
1225    if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1226        return -TARGET_EFAULT;
1227    }
1228
1229    __put_user(tv->tv_sec, &target_tv->tv_sec);
1230    __put_user(tv->tv_usec, &target_tv->tv_usec);
1231
1232    unlock_user_struct(target_tv, target_tv_addr, 1);
1233
1234    return 0;
1235}
1236
1237#if defined(TARGET_NR_futex) || \
1238    defined(TARGET_NR_rt_sigtimedwait) || \
1239    defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1240    defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1241    defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1242    defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1243    defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1244    defined(TARGET_NR_timer_settime) || \
1245    (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1246static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1247                                               abi_ulong target_addr)
1248{
1249    struct target_timespec *target_ts;
1250
1251    if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1252        return -TARGET_EFAULT;
1253    }
1254    __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1255    __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1256    unlock_user_struct(target_ts, target_addr, 0);
1257    return 0;
1258}
1259#endif
1260
1261#if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1262    defined(TARGET_NR_timer_settime64) || \
1263    defined(TARGET_NR_mq_timedsend_time64) || \
1264    defined(TARGET_NR_mq_timedreceive_time64) || \
1265    (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1266    defined(TARGET_NR_clock_nanosleep_time64) || \
1267    defined(TARGET_NR_rt_sigtimedwait_time64) || \
1268    defined(TARGET_NR_utimensat) || \
1269    defined(TARGET_NR_utimensat_time64) || \
1270    defined(TARGET_NR_semtimedop_time64) || \
1271    defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1272static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1273                                                 abi_ulong target_addr)
1274{
1275    struct target__kernel_timespec *target_ts;
1276
1277    if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1278        return -TARGET_EFAULT;
1279    }
1280    __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1281    __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1282    /* in 32bit mode, this drops the padding */
1283    host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1284    unlock_user_struct(target_ts, target_addr, 0);
1285    return 0;
1286}
1287#endif
1288
1289static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1290                                               struct timespec *host_ts)
1291{
1292    struct target_timespec *target_ts;
1293
1294    if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1295        return -TARGET_EFAULT;
1296    }
1297    __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1298    __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1299    unlock_user_struct(target_ts, target_addr, 1);
1300    return 0;
1301}
1302
1303static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1304                                                 struct timespec *host_ts)
1305{
1306    struct target__kernel_timespec *target_ts;
1307
1308    if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1309        return -TARGET_EFAULT;
1310    }
1311    __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1312    __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1313    unlock_user_struct(target_ts, target_addr, 1);
1314    return 0;
1315}
1316
1317#if defined(TARGET_NR_gettimeofday)
1318static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1319                                             struct timezone *tz)
1320{
1321    struct target_timezone *target_tz;
1322
1323    if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1324        return -TARGET_EFAULT;
1325    }
1326
1327    __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1328    __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1329
1330    unlock_user_struct(target_tz, target_tz_addr, 1);
1331
1332    return 0;
1333}
1334#endif
1335
1336#if defined(TARGET_NR_settimeofday)
1337static inline abi_long copy_from_user_timezone(struct timezone *tz,
1338                                               abi_ulong target_tz_addr)
1339{
1340    struct target_timezone *target_tz;
1341
1342    if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1343        return -TARGET_EFAULT;
1344    }
1345
1346    __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1347    __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1348
1349    unlock_user_struct(target_tz, target_tz_addr, 0);
1350
1351    return 0;
1352}
1353#endif
1354
1355#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1356#include <mqueue.h>
1357
1358static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1359                                              abi_ulong target_mq_attr_addr)
1360{
1361    struct target_mq_attr *target_mq_attr;
1362
1363    if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1364                          target_mq_attr_addr, 1))
1365        return -TARGET_EFAULT;
1366
1367    __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1368    __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1369    __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1370    __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1371
1372    unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1373
1374    return 0;
1375}
1376
1377static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1378                                            const struct mq_attr *attr)
1379{
1380    struct target_mq_attr *target_mq_attr;
1381
1382    if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1383                          target_mq_attr_addr, 0))
1384        return -TARGET_EFAULT;
1385
1386    __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1387    __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1388    __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1389    __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1390
1391    unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1392
1393    return 0;
1394}
1395#endif
1396
1397#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1398/* do_select() must return target values and target errnos. */
1399static abi_long do_select(int n,
1400                          abi_ulong rfd_addr, abi_ulong wfd_addr,
1401                          abi_ulong efd_addr, abi_ulong target_tv_addr)
1402{
1403    fd_set rfds, wfds, efds;
1404    fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1405    struct timeval tv;
1406    struct timespec ts, *ts_ptr;
1407    abi_long ret;
1408
1409    ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1410    if (ret) {
1411        return ret;
1412    }
1413    ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1414    if (ret) {
1415        return ret;
1416    }
1417    ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1418    if (ret) {
1419        return ret;
1420    }
1421
1422    if (target_tv_addr) {
1423        if (copy_from_user_timeval(&tv, target_tv_addr))
1424            return -TARGET_EFAULT;
1425        ts.tv_sec = tv.tv_sec;
1426        ts.tv_nsec = tv.tv_usec * 1000;
1427        ts_ptr = &ts;
1428    } else {
1429        ts_ptr = NULL;
1430    }
1431
1432    ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1433                                  ts_ptr, NULL));
1434
1435    if (!is_error(ret)) {
1436        if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1437            return -TARGET_EFAULT;
1438        if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1439            return -TARGET_EFAULT;
1440        if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1441            return -TARGET_EFAULT;
1442
1443        if (target_tv_addr) {
1444            tv.tv_sec = ts.tv_sec;
1445            tv.tv_usec = ts.tv_nsec / 1000;
1446            if (copy_to_user_timeval(target_tv_addr, &tv)) {
1447                return -TARGET_EFAULT;
1448            }
1449        }
1450    }
1451
1452    return ret;
1453}
1454
1455#if defined(TARGET_WANT_OLD_SYS_SELECT)
1456static abi_long do_old_select(abi_ulong arg1)
1457{
1458    struct target_sel_arg_struct *sel;
1459    abi_ulong inp, outp, exp, tvp;
1460    long nsel;
1461
1462    if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1463        return -TARGET_EFAULT;
1464    }
1465
1466    nsel = tswapal(sel->n);
1467    inp = tswapal(sel->inp);
1468    outp = tswapal(sel->outp);
1469    exp = tswapal(sel->exp);
1470    tvp = tswapal(sel->tvp);
1471
1472    unlock_user_struct(sel, arg1, 0);
1473
1474    return do_select(nsel, inp, outp, exp, tvp);
1475}
1476#endif
1477#endif
1478
1479#if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1480static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1481                            abi_long arg4, abi_long arg5, abi_long arg6,
1482                            bool time64)
1483{
1484    abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1485    fd_set rfds, wfds, efds;
1486    fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1487    struct timespec ts, *ts_ptr;
1488    abi_long ret;
1489
1490    /*
1491     * The 6th arg is actually two args smashed together,
1492     * so we cannot use the C library.
1493     */
1494    sigset_t set;
1495    struct {
1496        sigset_t *set;
1497        size_t size;
1498    } sig, *sig_ptr;
1499
1500    abi_ulong arg_sigset, arg_sigsize, *arg7;
1501    target_sigset_t *target_sigset;
1502
1503    n = arg1;
1504    rfd_addr = arg2;
1505    wfd_addr = arg3;
1506    efd_addr = arg4;
1507    ts_addr = arg5;
1508
1509    ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1510    if (ret) {
1511        return ret;
1512    }
1513    ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1514    if (ret) {
1515        return ret;
1516    }
1517    ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1518    if (ret) {
1519        return ret;
1520    }
1521
1522    /*
1523     * This takes a timespec, and not a timeval, so we cannot
1524     * use the do_select() helper ...
1525     */
1526    if (ts_addr) {
1527        if (time64) {
1528            if (target_to_host_timespec64(&ts, ts_addr)) {
1529                return -TARGET_EFAULT;
1530            }
1531        } else {
1532            if (target_to_host_timespec(&ts, ts_addr)) {
1533                return -TARGET_EFAULT;
1534            }
1535        }
1536            ts_ptr = &ts;
1537    } else {
1538        ts_ptr = NULL;
1539    }
1540
1541    /* Extract the two packed args for the sigset */
1542    if (arg6) {
1543        sig_ptr = &sig;
1544        sig.size = SIGSET_T_SIZE;
1545
1546        arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1547        if (!arg7) {
1548            return -TARGET_EFAULT;
1549        }
1550        arg_sigset = tswapal(arg7[0]);
1551        arg_sigsize = tswapal(arg7[1]);
1552        unlock_user(arg7, arg6, 0);
1553
1554        if (arg_sigset) {
1555            sig.set = &set;
1556            if (arg_sigsize != sizeof(*target_sigset)) {
1557                /* Like the kernel, we enforce correct size sigsets */
1558                return -TARGET_EINVAL;
1559            }
1560            target_sigset = lock_user(VERIFY_READ, arg_sigset,
1561                                      sizeof(*target_sigset), 1);
1562            if (!target_sigset) {
1563                return -TARGET_EFAULT;
1564            }
1565            target_to_host_sigset(&set, target_sigset);
1566            unlock_user(target_sigset, arg_sigset, 0);
1567        } else {
1568            sig.set = NULL;
1569        }
1570    } else {
1571        sig_ptr = NULL;
1572    }
1573
1574    ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1575                                  ts_ptr, sig_ptr));
1576
1577    if (!is_error(ret)) {
1578        if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1579            return -TARGET_EFAULT;
1580        }
1581        if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1582            return -TARGET_EFAULT;
1583        }
1584        if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1585            return -TARGET_EFAULT;
1586        }
1587        if (time64) {
1588            if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1589                return -TARGET_EFAULT;
1590            }
1591        } else {
1592            if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1593                return -TARGET_EFAULT;
1594            }
1595        }
1596    }
1597    return ret;
1598}
1599#endif
1600
1601#if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1602    defined(TARGET_NR_ppoll_time64)
1603static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1604                         abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1605{
1606    struct target_pollfd *target_pfd;
1607    unsigned int nfds = arg2;
1608    struct pollfd *pfd;
1609    unsigned int i;
1610    abi_long ret;
1611
1612    pfd = NULL;
1613    target_pfd = NULL;
1614    if (nfds) {
1615        if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1616            return -TARGET_EINVAL;
1617        }
1618        target_pfd = lock_user(VERIFY_WRITE, arg1,
1619                               sizeof(struct target_pollfd) * nfds, 1);
1620        if (!target_pfd) {
1621            return -TARGET_EFAULT;
1622        }
1623
1624        pfd = alloca(sizeof(struct pollfd) * nfds);
1625        for (i = 0; i < nfds; i++) {
1626            pfd[i].fd = tswap32(target_pfd[i].fd);
1627            pfd[i].events = tswap16(target_pfd[i].events);
1628        }
1629    }
1630    if (ppoll) {
1631        struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1632        target_sigset_t *target_set;
1633        sigset_t _set, *set = &_set;
1634
1635        if (arg3) {
1636            if (time64) {
1637                if (target_to_host_timespec64(timeout_ts, arg3)) {
1638                    unlock_user(target_pfd, arg1, 0);
1639                    return -TARGET_EFAULT;
1640                }
1641            } else {
1642                if (target_to_host_timespec(timeout_ts, arg3)) {
1643                    unlock_user(target_pfd, arg1, 0);
1644                    return -TARGET_EFAULT;
1645                }
1646            }
1647        } else {
1648            timeout_ts = NULL;
1649        }
1650
1651        if (arg4) {
1652            if (arg5 != sizeof(target_sigset_t)) {
1653                unlock_user(target_pfd, arg1, 0);
1654                return -TARGET_EINVAL;
1655            }
1656
1657            target_set = lock_user(VERIFY_READ, arg4,
1658                                   sizeof(target_sigset_t), 1);
1659            if (!target_set) {
1660                unlock_user(target_pfd, arg1, 0);
1661                return -TARGET_EFAULT;
1662            }
1663            target_to_host_sigset(set, target_set);
1664        } else {
1665            set = NULL;
1666        }
1667
1668        ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1669                                   set, SIGSET_T_SIZE));
1670
1671        if (!is_error(ret) && arg3) {
1672            if (time64) {
1673                if (host_to_target_timespec64(arg3, timeout_ts)) {
1674                    return -TARGET_EFAULT;
1675                }
1676            } else {
1677                if (host_to_target_timespec(arg3, timeout_ts)) {
1678                    return -TARGET_EFAULT;
1679                }
1680            }
1681        }
1682        if (arg4) {
1683            unlock_user(target_set, arg4, 0);
1684        }
1685    } else {
1686          struct timespec ts, *pts;
1687
1688          if (arg3 >= 0) {
1689              /* Convert ms to secs, ns */
1690              ts.tv_sec = arg3 / 1000;
1691              ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1692              pts = &ts;
1693          } else {
1694              /* -ve poll() timeout means "infinite" */
1695              pts = NULL;
1696          }
1697          ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1698    }
1699
1700    if (!is_error(ret)) {
1701        for (i = 0; i < nfds; i++) {
1702            target_pfd[i].revents = tswap16(pfd[i].revents);
1703        }
1704    }
1705    unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1706    return ret;
1707}
1708#endif
1709
1710static abi_long do_pipe2(int host_pipe[], int flags)
1711{
1712#ifdef CONFIG_PIPE2
1713    return pipe2(host_pipe, flags);
1714#else
1715    return -ENOSYS;
1716#endif
1717}
1718
1719static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1720                        int flags, int is_pipe2)
1721{
1722    int host_pipe[2];
1723    abi_long ret;
1724    ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1725
1726    if (is_error(ret))
1727        return get_errno(ret);
1728
1729    /* Several targets have special calling conventions for the original
1730       pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1731    if (!is_pipe2) {
1732#if defined(TARGET_ALPHA)
1733        ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1734        return host_pipe[0];
1735#elif defined(TARGET_MIPS)
1736        ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1737        return host_pipe[0];
1738#elif defined(TARGET_SH4)
1739        ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1740        return host_pipe[0];
1741#elif defined(TARGET_SPARC)
1742        ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1743        return host_pipe[0];
1744#endif
1745    }
1746
1747    if (put_user_s32(host_pipe[0], pipedes)
1748        || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1749        return -TARGET_EFAULT;
1750    return get_errno(ret);
1751}
1752
1753static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1754                                              abi_ulong target_addr,
1755                                              socklen_t len)
1756{
1757    struct target_ip_mreqn *target_smreqn;
1758
1759    target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1760    if (!target_smreqn)
1761        return -TARGET_EFAULT;
1762    mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1763    mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1764    if (len == sizeof(struct target_ip_mreqn))
1765        mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1766    unlock_user(target_smreqn, target_addr, 0);
1767
1768    return 0;
1769}
1770
1771static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1772                                               abi_ulong target_addr,
1773                                               socklen_t len)
1774{
1775    const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1776    sa_family_t sa_family;
1777    struct target_sockaddr *target_saddr;
1778
1779    if (fd_trans_target_to_host_addr(fd)) {
1780        return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1781    }
1782
1783    target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1784    if (!target_saddr)
1785        return -TARGET_EFAULT;
1786
1787    sa_family = tswap16(target_saddr->sa_family);
1788
1789    /* Oops. The caller might send a incomplete sun_path; sun_path
1790     * must be terminated by \0 (see the manual page), but
1791     * unfortunately it is quite common to specify sockaddr_un
1792     * length as "strlen(x->sun_path)" while it should be
1793     * "strlen(...) + 1". We'll fix that here if needed.
1794     * Linux kernel has a similar feature.
1795     */
1796
1797    if (sa_family == AF_UNIX) {
1798        if (len < unix_maxlen && len > 0) {
1799            char *cp = (char*)target_saddr;
1800
1801            if ( cp[len-1] && !cp[len] )
1802                len++;
1803        }
1804        if (len > unix_maxlen)
1805            len = unix_maxlen;
1806    }
1807
1808    memcpy(addr, target_saddr, len);
1809    addr->sa_family = sa_family;
1810    if (sa_family == AF_NETLINK) {
1811        struct sockaddr_nl *nladdr;
1812
1813        nladdr = (struct sockaddr_nl *)addr;
1814        nladdr->nl_pid = tswap32(nladdr->nl_pid);
1815        nladdr->nl_groups = tswap32(nladdr->nl_groups);
1816    } else if (sa_family == AF_PACKET) {
1817        struct target_sockaddr_ll *lladdr;
1818
1819        lladdr = (struct target_sockaddr_ll *)addr;
1820        lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1821        lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1822    }
1823    unlock_user(target_saddr, target_addr, 0);
1824
1825    return 0;
1826}
1827
1828static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1829                                               struct sockaddr *addr,
1830                                               socklen_t len)
1831{
1832    struct target_sockaddr *target_saddr;
1833
1834    if (len == 0) {
1835        return 0;
1836    }
1837    assert(addr);
1838
1839    target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1840    if (!target_saddr)
1841        return -TARGET_EFAULT;
1842    memcpy(target_saddr, addr, len);
1843    if (len >= offsetof(struct target_sockaddr, sa_family) +
1844        sizeof(target_saddr->sa_family)) {
1845        target_saddr->sa_family = tswap16(addr->sa_family);
1846    }
1847    if (addr->sa_family == AF_NETLINK &&
1848        len >= sizeof(struct target_sockaddr_nl)) {
1849        struct target_sockaddr_nl *target_nl =
1850               (struct target_sockaddr_nl *)target_saddr;
1851        target_nl->nl_pid = tswap32(target_nl->nl_pid);
1852        target_nl->nl_groups = tswap32(target_nl->nl_groups);
1853    } else if (addr->sa_family == AF_PACKET) {
1854        struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1855        target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1856        target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1857    } else if (addr->sa_family == AF_INET6 &&
1858               len >= sizeof(struct target_sockaddr_in6)) {
1859        struct target_sockaddr_in6 *target_in6 =
1860               (struct target_sockaddr_in6 *)target_saddr;
1861        target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1862    }
1863    unlock_user(target_saddr, target_addr, len);
1864
1865    return 0;
1866}
1867
1868static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1869                                           struct target_msghdr *target_msgh)
1870{
1871    struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1872    abi_long msg_controllen;
1873    abi_ulong target_cmsg_addr;
1874    struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1875    socklen_t space = 0;
1876    
1877    msg_controllen = tswapal(target_msgh->msg_controllen);
1878    if (msg_controllen < sizeof (struct target_cmsghdr)) 
1879        goto the_end;
1880    target_cmsg_addr = tswapal(target_msgh->msg_control);
1881    target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1882    target_cmsg_start = target_cmsg;
1883    if (!target_cmsg)
1884        return -TARGET_EFAULT;
1885
1886    while (cmsg && target_cmsg) {
1887        void *data = CMSG_DATA(cmsg);
1888        void *target_data = TARGET_CMSG_DATA(target_cmsg);
1889
1890        int len = tswapal(target_cmsg->cmsg_len)
1891            - sizeof(struct target_cmsghdr);
1892
1893        space += CMSG_SPACE(len);
1894        if (space > msgh->msg_controllen) {
1895            space -= CMSG_SPACE(len);
1896            /* This is a QEMU bug, since we allocated the payload
1897             * area ourselves (unlike overflow in host-to-target
1898             * conversion, which is just the guest giving us a buffer
1899             * that's too small). It can't happen for the payload types
1900             * we currently support; if it becomes an issue in future
1901             * we would need to improve our allocation strategy to
1902             * something more intelligent than "twice the size of the
1903             * target buffer we're reading from".
1904             */
1905            qemu_log_mask(LOG_UNIMP,
1906                          ("Unsupported ancillary data %d/%d: "
1907                           "unhandled msg size\n"),
1908                          tswap32(target_cmsg->cmsg_level),
1909                          tswap32(target_cmsg->cmsg_type));
1910            break;
1911        }
1912
1913        if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1914            cmsg->cmsg_level = SOL_SOCKET;
1915        } else {
1916            cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1917        }
1918        cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1919        cmsg->cmsg_len = CMSG_LEN(len);
1920
1921        if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1922            int *fd = (int *)data;
1923            int *target_fd = (int *)target_data;
1924            int i, numfds = len / sizeof(int);
1925
1926            for (i = 0; i < numfds; i++) {
1927                __get_user(fd[i], target_fd + i);
1928            }
1929        } else if (cmsg->cmsg_level == SOL_SOCKET
1930               &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1931            struct ucred *cred = (struct ucred *)data;
1932            struct target_ucred *target_cred =
1933                (struct target_ucred *)target_data;
1934
1935            __get_user(cred->pid, &target_cred->pid);
1936            __get_user(cred->uid, &target_cred->uid);
1937            __get_user(cred->gid, &target_cred->gid);
1938        } else {
1939            qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1940                          cmsg->cmsg_level, cmsg->cmsg_type);
1941            memcpy(data, target_data, len);
1942        }
1943
1944        cmsg = CMSG_NXTHDR(msgh, cmsg);
1945        target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1946                                         target_cmsg_start);
1947    }
1948    unlock_user(target_cmsg, target_cmsg_addr, 0);
1949 the_end:
1950    msgh->msg_controllen = space;
1951    return 0;
1952}
1953
1954static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1955                                           struct msghdr *msgh)
1956{
1957    struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1958    abi_long msg_controllen;
1959    abi_ulong target_cmsg_addr;
1960    struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1961    socklen_t space = 0;
1962
1963    msg_controllen = tswapal(target_msgh->msg_controllen);
1964    if (msg_controllen < sizeof (struct target_cmsghdr)) 
1965        goto the_end;
1966    target_cmsg_addr = tswapal(target_msgh->msg_control);
1967    target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1968    target_cmsg_start = target_cmsg;
1969    if (!target_cmsg)
1970        return -TARGET_EFAULT;
1971
1972    while (cmsg && target_cmsg) {
1973        void *data = CMSG_DATA(cmsg);
1974        void *target_data = TARGET_CMSG_DATA(target_cmsg);
1975
1976        int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1977        int tgt_len, tgt_space;
1978
1979        /* We never copy a half-header but may copy half-data;
1980         * this is Linux's behaviour in put_cmsg(). Note that
1981         * truncation here is a guest problem (which we report
1982         * to the guest via the CTRUNC bit), unlike truncation
1983         * in target_to_host_cmsg, which is a QEMU bug.
1984         */
1985        if (msg_controllen < sizeof(struct target_cmsghdr)) {
1986            target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1987            break;
1988        }
1989
1990        if (cmsg->cmsg_level == SOL_SOCKET) {
1991            target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1992        } else {
1993            target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1994        }
1995        target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1996
1997        /* Payload types which need a different size of payload on
1998         * the target must adjust tgt_len here.
1999         */
2000        tgt_len = len;
2001        switch (cmsg->cmsg_level) {
2002        case SOL_SOCKET:
2003            switch (cmsg->cmsg_type) {
2004            case SO_TIMESTAMP:
2005                tgt_len = sizeof(struct target_timeval);
2006                break;
2007            default:
2008                break;
2009            }
2010            break;
2011        default:
2012            break;
2013        }
2014
2015        if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2016            target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2017            tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2018        }
2019
2020        /* We must now copy-and-convert len bytes of payload
2021         * into tgt_len bytes of destination space. Bear in mind
2022         * that in both source and destination we may be dealing
2023         * with a truncated value!
2024         */
2025        switch (cmsg->cmsg_level) {
2026        case SOL_SOCKET:
2027            switch (cmsg->cmsg_type) {
2028            case SCM_RIGHTS:
2029            {
2030                int *fd = (int *)data;
2031                int *target_fd = (int *)target_data;
2032                int i, numfds = tgt_len / sizeof(int);
2033
2034                for (i = 0; i < numfds; i++) {
2035                    __put_user(fd[i], target_fd + i);
2036                }
2037                break;
2038            }
2039            case SO_TIMESTAMP:
2040            {
2041                struct timeval *tv = (struct timeval *)data;
2042                struct target_timeval *target_tv =
2043                    (struct target_timeval *)target_data;
2044
2045                if (len != sizeof(struct timeval) ||
2046                    tgt_len != sizeof(struct target_timeval)) {
2047                    goto unimplemented;
2048                }
2049
2050                /* copy struct timeval to target */
2051                __put_user(tv->tv_sec, &target_tv->tv_sec);
2052                __put_user(tv->tv_usec, &target_tv->tv_usec);
2053                break;
2054            }
2055            case SCM_CREDENTIALS:
2056            {
2057                struct ucred *cred = (struct ucred *)data;
2058                struct target_ucred *target_cred =
2059                    (struct target_ucred *)target_data;
2060
2061                __put_user(cred->pid, &target_cred->pid);
2062                __put_user(cred->uid, &target_cred->uid);
2063                __put_user(cred->gid, &target_cred->gid);
2064                break;
2065            }
2066            default:
2067                goto unimplemented;
2068            }
2069            break;
2070
2071        case SOL_IP:
2072            switch (cmsg->cmsg_type) {
2073            case IP_TTL:
2074            {
2075                uint32_t *v = (uint32_t *)data;
2076                uint32_t *t_int = (uint32_t *)target_data;
2077
2078                if (len != sizeof(uint32_t) ||
2079                    tgt_len != sizeof(uint32_t)) {
2080                    goto unimplemented;
2081                }
2082                __put_user(*v, t_int);
2083                break;
2084            }
2085            case IP_RECVERR:
2086            {
2087                struct errhdr_t {
2088                   struct sock_extended_err ee;
2089                   struct sockaddr_in offender;
2090                };
2091                struct errhdr_t *errh = (struct errhdr_t *)data;
2092                struct errhdr_t *target_errh =
2093                    (struct errhdr_t *)target_data;
2094
2095                if (len != sizeof(struct errhdr_t) ||
2096                    tgt_len != sizeof(struct errhdr_t)) {
2097                    goto unimplemented;
2098                }
2099                __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2100                __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2101                __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2102                __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2103                __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2104                __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2105                __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2106                host_to_target_sockaddr((unsigned long) &target_errh->offender,
2107                    (void *) &errh->offender, sizeof(errh->offender));
2108                break;
2109            }
2110            default:
2111                goto unimplemented;
2112            }
2113            break;
2114
2115        case SOL_IPV6:
2116            switch (cmsg->cmsg_type) {
2117            case IPV6_HOPLIMIT:
2118            {
2119                uint32_t *v = (uint32_t *)data;
2120                uint32_t *t_int = (uint32_t *)target_data;
2121
2122                if (len != sizeof(uint32_t) ||
2123                    tgt_len != sizeof(uint32_t)) {
2124                    goto unimplemented;
2125                }
2126                __put_user(*v, t_int);
2127                break;
2128            }
2129            case IPV6_RECVERR:
2130            {
2131                struct errhdr6_t {
2132                   struct sock_extended_err ee;
2133                   struct sockaddr_in6 offender;
2134                };
2135                struct errhdr6_t *errh = (struct errhdr6_t *)data;
2136                struct errhdr6_t *target_errh =
2137                    (struct errhdr6_t *)target_data;
2138
2139                if (len != sizeof(struct errhdr6_t) ||
2140                    tgt_len != sizeof(struct errhdr6_t)) {
2141                    goto unimplemented;
2142                }
2143                __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2144                __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2145                __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2146                __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2147                __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2148                __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2149                __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2150                host_to_target_sockaddr((unsigned long) &target_errh->offender,
2151                    (void *) &errh->offender, sizeof(errh->offender));
2152                break;
2153            }
2154            default:
2155                goto unimplemented;
2156            }
2157            break;
2158
2159        default:
2160        unimplemented:
2161            qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2162                          cmsg->cmsg_level, cmsg->cmsg_type);
2163            memcpy(target_data, data, MIN(len, tgt_len));
2164            if (tgt_len > len) {
2165                memset(target_data + len, 0, tgt_len - len);
2166            }
2167        }
2168
2169        target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2170        tgt_space = TARGET_CMSG_SPACE(tgt_len);
2171        if (msg_controllen < tgt_space) {
2172            tgt_space = msg_controllen;
2173        }
2174        msg_controllen -= tgt_space;
2175        space += tgt_space;
2176        cmsg = CMSG_NXTHDR(msgh, cmsg);
2177        target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2178                                         target_cmsg_start);
2179    }
2180    unlock_user(target_cmsg, target_cmsg_addr, space);
2181 the_end:
2182    target_msgh->msg_controllen = tswapal(space);
2183    return 0;
2184}
2185
2186/* do_setsockopt() Must return target values and target errnos. */
2187static abi_long do_setsockopt(int sockfd, int level, int optname,
2188                              abi_ulong optval_addr, socklen_t optlen)
2189{
2190    abi_long ret;
2191    int val;
2192    struct ip_mreqn *ip_mreq;
2193    struct ip_mreq_source *ip_mreq_source;
2194
2195    switch(level) {
2196    case SOL_TCP:
2197    case SOL_UDP:
2198        /* TCP and UDP options all take an 'int' value.  */
2199        if (optlen < sizeof(uint32_t))
2200            return -TARGET_EINVAL;
2201
2202        if (get_user_u32(val, optval_addr))
2203            return -TARGET_EFAULT;
2204        ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2205        break;
2206    case SOL_IP:
2207        switch(optname) {
2208        case IP_TOS:
2209        case IP_TTL:
2210        case IP_HDRINCL:
2211        case IP_ROUTER_ALERT:
2212        case IP_RECVOPTS:
2213        case IP_RETOPTS:
2214        case IP_PKTINFO:
2215        case IP_MTU_DISCOVER:
2216        case IP_RECVERR:
2217        case IP_RECVTTL:
2218        case IP_RECVTOS:
2219#ifdef IP_FREEBIND
2220        case IP_FREEBIND:
2221#endif
2222        case IP_MULTICAST_TTL:
2223        case IP_MULTICAST_LOOP:
2224            val = 0;
2225            if (optlen >= sizeof(uint32_t)) {
2226                if (get_user_u32(val, optval_addr))
2227                    return -TARGET_EFAULT;
2228            } else if (optlen >= 1) {
2229                if (get_user_u8(val, optval_addr))
2230                    return -TARGET_EFAULT;
2231            }
2232            ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2233            break;
2234        case IP_ADD_MEMBERSHIP:
2235        case IP_DROP_MEMBERSHIP:
2236            if (optlen < sizeof (struct target_ip_mreq) ||
2237                optlen > sizeof (struct target_ip_mreqn))
2238                return -TARGET_EINVAL;
2239
2240            ip_mreq = (struct ip_mreqn *) alloca(optlen);
2241            target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2242            ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2243            break;
2244
2245        case IP_BLOCK_SOURCE:
2246        case IP_UNBLOCK_SOURCE:
2247        case IP_ADD_SOURCE_MEMBERSHIP:
2248        case IP_DROP_SOURCE_MEMBERSHIP:
2249            if (optlen != sizeof (struct target_ip_mreq_source))
2250                return -TARGET_EINVAL;
2251
2252            ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2253            ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2254            unlock_user (ip_mreq_source, optval_addr, 0);
2255            break;
2256
2257        default:
2258            goto unimplemented;
2259        }
2260        break;
2261    case SOL_IPV6:
2262        switch (optname) {
2263        case IPV6_MTU_DISCOVER:
2264        case IPV6_MTU:
2265        case IPV6_V6ONLY:
2266        case IPV6_RECVPKTINFO:
2267        case IPV6_UNICAST_HOPS:
2268        case IPV6_MULTICAST_HOPS:
2269        case IPV6_MULTICAST_LOOP:
2270        case IPV6_RECVERR:
2271        case IPV6_RECVHOPLIMIT:
2272        case IPV6_2292HOPLIMIT:
2273        case IPV6_CHECKSUM:
2274        case IPV6_ADDRFORM:
2275        case IPV6_2292PKTINFO:
2276        case IPV6_RECVTCLASS:
2277        case IPV6_RECVRTHDR:
2278        case IPV6_2292RTHDR:
2279        case IPV6_RECVHOPOPTS:
2280        case IPV6_2292HOPOPTS:
2281        case IPV6_RECVDSTOPTS:
2282        case IPV6_2292DSTOPTS:
2283        case IPV6_TCLASS:
2284        case IPV6_ADDR_PREFERENCES:
2285#ifdef IPV6_RECVPATHMTU
2286        case IPV6_RECVPATHMTU:
2287#endif
2288#ifdef IPV6_TRANSPARENT
2289        case IPV6_TRANSPARENT:
2290#endif
2291#ifdef IPV6_FREEBIND
2292        case IPV6_FREEBIND:
2293#endif
2294#ifdef IPV6_RECVORIGDSTADDR
2295        case IPV6_RECVORIGDSTADDR:
2296#endif
2297            val = 0;
2298            if (optlen < sizeof(uint32_t)) {
2299                return -TARGET_EINVAL;
2300            }
2301            if (get_user_u32(val, optval_addr)) {
2302                return -TARGET_EFAULT;
2303            }
2304            ret = get_errno(setsockopt(sockfd, level, optname,
2305                                       &val, sizeof(val)));
2306            break;
2307        case IPV6_PKTINFO:
2308        {
2309            struct in6_pktinfo pki;
2310
2311            if (optlen < sizeof(pki)) {
2312                return -TARGET_EINVAL;
2313            }
2314
2315            if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2316                return -TARGET_EFAULT;
2317            }
2318
2319            pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2320
2321            ret = get_errno(setsockopt(sockfd, level, optname,
2322                                       &pki, sizeof(pki)));
2323            break;
2324        }
2325        case IPV6_ADD_MEMBERSHIP:
2326        case IPV6_DROP_MEMBERSHIP:
2327        {
2328            struct ipv6_mreq ipv6mreq;
2329
2330            if (optlen < sizeof(ipv6mreq)) {
2331                return -TARGET_EINVAL;
2332            }
2333
2334            if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2335                return -TARGET_EFAULT;
2336            }
2337
2338            ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2339
2340            ret = get_errno(setsockopt(sockfd, level, optname,
2341                                       &ipv6mreq, sizeof(ipv6mreq)));
2342            break;
2343        }
2344        default:
2345            goto unimplemented;
2346        }
2347        break;
2348    case SOL_ICMPV6:
2349        switch (optname) {
2350        case ICMPV6_FILTER:
2351        {
2352            struct icmp6_filter icmp6f;
2353
2354            if (optlen > sizeof(icmp6f)) {
2355                optlen = sizeof(icmp6f);
2356            }
2357
2358            if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2359                return -TARGET_EFAULT;
2360            }
2361
2362            for (val = 0; val < 8; val++) {
2363                icmp6f.data[val] = tswap32(icmp6f.data[val]);
2364            }
2365
2366            ret = get_errno(setsockopt(sockfd, level, optname,
2367                                       &icmp6f, optlen));
2368            break;
2369        }
2370        default:
2371            goto unimplemented;
2372        }
2373        break;
2374    case SOL_RAW:
2375        switch (optname) {
2376        case ICMP_FILTER:
2377        case IPV6_CHECKSUM:
2378            /* those take an u32 value */
2379            if (optlen < sizeof(uint32_t)) {
2380                return -TARGET_EINVAL;
2381            }
2382
2383            if (get_user_u32(val, optval_addr)) {
2384                return -TARGET_EFAULT;
2385            }
2386            ret = get_errno(setsockopt(sockfd, level, optname,
2387                                       &val, sizeof(val)));
2388            break;
2389
2390        default:
2391            goto unimplemented;
2392        }
2393        break;
2394#if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2395    case SOL_ALG:
2396        switch (optname) {
2397        case ALG_SET_KEY:
2398        {
2399            char *alg_key = g_malloc(optlen);
2400
2401            if (!alg_key) {
2402                return -TARGET_ENOMEM;
2403            }
2404            if (copy_from_user(alg_key, optval_addr, optlen)) {
2405                g_free(alg_key);
2406                return -TARGET_EFAULT;
2407            }
2408            ret = get_errno(setsockopt(sockfd, level, optname,
2409                                       alg_key, optlen));
2410            g_free(alg_key);
2411            break;
2412        }
2413        case ALG_SET_AEAD_AUTHSIZE:
2414        {
2415            ret = get_errno(setsockopt(sockfd, level, optname,
2416                                       NULL, optlen));
2417            break;
2418        }
2419        default:
2420            goto unimplemented;
2421        }
2422        break;
2423#endif
2424    case TARGET_SOL_SOCKET:
2425        switch (optname) {
2426        case TARGET_SO_RCVTIMEO:
2427        {
2428                struct timeval tv;
2429
2430                optname = SO_RCVTIMEO;
2431
2432set_timeout:
2433                if (optlen != sizeof(struct target_timeval)) {
2434                    return -TARGET_EINVAL;
2435                }
2436
2437                if (copy_from_user_timeval(&tv, optval_addr)) {
2438                    return -TARGET_EFAULT;
2439                }
2440
2441                ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2442                                &tv, sizeof(tv)));
2443                return ret;
2444        }
2445        case TARGET_SO_SNDTIMEO:
2446                optname = SO_SNDTIMEO;
2447                goto set_timeout;
2448        case TARGET_SO_ATTACH_FILTER:
2449        {
2450                struct target_sock_fprog *tfprog;
2451                struct target_sock_filter *tfilter;
2452                struct sock_fprog fprog;
2453                struct sock_filter *filter;
2454                int i;
2455
2456                if (optlen != sizeof(*tfprog)) {
2457                    return -TARGET_EINVAL;
2458                }
2459                if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2460                    return -TARGET_EFAULT;
2461                }
2462                if (!lock_user_struct(VERIFY_READ, tfilter,
2463                                      tswapal(tfprog->filter), 0)) {
2464                    unlock_user_struct(tfprog, optval_addr, 1);
2465                    return -TARGET_EFAULT;
2466                }
2467
2468                fprog.len = tswap16(tfprog->len);
2469                filter = g_try_new(struct sock_filter, fprog.len);
2470                if (filter == NULL) {
2471                    unlock_user_struct(tfilter, tfprog->filter, 1);
2472                    unlock_user_struct(tfprog, optval_addr, 1);
2473                    return -TARGET_ENOMEM;
2474                }
2475                for (i = 0; i < fprog.len; i++) {
2476                    filter[i].code = tswap16(tfilter[i].code);
2477                    filter[i].jt = tfilter[i].jt;
2478                    filter[i].jf = tfilter[i].jf;
2479                    filter[i].k = tswap32(tfilter[i].k);
2480                }
2481                fprog.filter = filter;
2482
2483                ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2484                                SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2485                g_free(filter);
2486
2487                unlock_user_struct(tfilter, tfprog->filter, 1);
2488                unlock_user_struct(tfprog, optval_addr, 1);
2489                return ret;
2490        }
2491        case TARGET_SO_BINDTODEVICE:
2492        {
2493                char *dev_ifname, *addr_ifname;
2494
2495                if (optlen > IFNAMSIZ - 1) {
2496                    optlen = IFNAMSIZ - 1;
2497                }
2498                dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2499                if (!dev_ifname) {
2500                    return -TARGET_EFAULT;
2501                }
2502                optname = SO_BINDTODEVICE;
2503                addr_ifname = alloca(IFNAMSIZ);
2504                memcpy(addr_ifname, dev_ifname, optlen);
2505                addr_ifname[optlen] = 0;
2506                ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2507                                           addr_ifname, optlen));
2508                unlock_user (dev_ifname, optval_addr, 0);
2509                return ret;
2510        }
2511        case TARGET_SO_LINGER:
2512        {
2513                struct linger lg;
2514                struct target_linger *tlg;
2515
2516                if (optlen != sizeof(struct target_linger)) {
2517                    return -TARGET_EINVAL;
2518                }
2519                if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2520                    return -TARGET_EFAULT;
2521                }
2522                __get_user(lg.l_onoff, &tlg->l_onoff);
2523                __get_user(lg.l_linger, &tlg->l_linger);
2524                ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2525                                &lg, sizeof(lg)));
2526                unlock_user_struct(tlg, optval_addr, 0);
2527                return ret;
2528        }
2529            /* Options with 'int' argument.  */
2530        case TARGET_SO_DEBUG:
2531                optname = SO_DEBUG;
2532                break;
2533        case TARGET_SO_REUSEADDR:
2534                optname = SO_REUSEADDR;
2535                break;
2536#ifdef SO_REUSEPORT
2537        case TARGET_SO_REUSEPORT:
2538                optname = SO_REUSEPORT;
2539                break;
2540#endif
2541        case TARGET_SO_TYPE:
2542                optname = SO_TYPE;
2543                break;
2544        case TARGET_SO_ERROR:
2545                optname = SO_ERROR;
2546                break;
2547        case TARGET_SO_DONTROUTE:
2548                optname = SO_DONTROUTE;
2549                break;
2550        case TARGET_SO_BROADCAST:
2551                optname = SO_BROADCAST;
2552                break;
2553        case TARGET_SO_SNDBUF:
2554                optname = SO_SNDBUF;
2555                break;
2556        case TARGET_SO_SNDBUFFORCE:
2557                optname = SO_SNDBUFFORCE;
2558                break;
2559        case TARGET_SO_RCVBUF:
2560                optname = SO_RCVBUF;
2561                break;
2562        case TARGET_SO_RCVBUFFORCE:
2563                optname = SO_RCVBUFFORCE;
2564                break;
2565        case TARGET_SO_KEEPALIVE:
2566                optname = SO_KEEPALIVE;
2567                break;
2568        case TARGET_SO_OOBINLINE:
2569                optname = SO_OOBINLINE;
2570                break;
2571        case TARGET_SO_NO_CHECK:
2572                optname = SO_NO_CHECK;
2573                break;
2574        case TARGET_SO_PRIORITY:
2575                optname = SO_PRIORITY;
2576                break;
2577#ifdef SO_BSDCOMPAT
2578        case TARGET_SO_BSDCOMPAT:
2579                optname = SO_BSDCOMPAT;
2580                break;
2581#endif
2582        case TARGET_SO_PASSCRED:
2583                optname = SO_PASSCRED;
2584                break;
2585        case TARGET_SO_PASSSEC:
2586                optname = SO_PASSSEC;
2587                break;
2588        case TARGET_SO_TIMESTAMP:
2589                optname = SO_TIMESTAMP;
2590                break;
2591        case TARGET_SO_RCVLOWAT:
2592                optname = SO_RCVLOWAT;
2593                break;
2594        default:
2595            goto unimplemented;
2596        }
2597        if (optlen < sizeof(uint32_t))
2598            return -TARGET_EINVAL;
2599
2600        if (get_user_u32(val, optval_addr))
2601            return -TARGET_EFAULT;
2602        ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2603        break;
2604#ifdef SOL_NETLINK
2605    case SOL_NETLINK:
2606        switch (optname) {
2607        case NETLINK_PKTINFO:
2608        case NETLINK_ADD_MEMBERSHIP:
2609        case NETLINK_DROP_MEMBERSHIP:
2610        case NETLINK_BROADCAST_ERROR:
2611        case NETLINK_NO_ENOBUFS:
2612#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2613        case NETLINK_LISTEN_ALL_NSID:
2614        case NETLINK_CAP_ACK:
2615#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2616#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2617        case NETLINK_EXT_ACK:
2618#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2619#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2620        case NETLINK_GET_STRICT_CHK:
2621#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2622            break;
2623        default:
2624            goto unimplemented;
2625        }
2626        val = 0;
2627        if (optlen < sizeof(uint32_t)) {
2628            return -TARGET_EINVAL;
2629        }
2630        if (get_user_u32(val, optval_addr)) {
2631            return -TARGET_EFAULT;
2632        }
2633        ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2634                                   sizeof(val)));
2635        break;
2636#endif /* SOL_NETLINK */
2637    default:
2638    unimplemented:
2639        qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2640                      level, optname);
2641        ret = -TARGET_ENOPROTOOPT;
2642    }
2643    return ret;
2644}
2645
2646/* do_getsockopt() Must return target values and target errnos. */
2647static abi_long do_getsockopt(int sockfd, int level, int optname,
2648                              abi_ulong optval_addr, abi_ulong optlen)
2649{
2650    abi_long ret;
2651    int len, val;
2652    socklen_t lv;
2653
2654    switch(level) {
2655    case TARGET_SOL_SOCKET:
2656        level = SOL_SOCKET;
2657        switch (optname) {
2658        /* These don't just return a single integer */
2659        case TARGET_SO_PEERNAME:
2660            goto unimplemented;
2661        case TARGET_SO_RCVTIMEO: {
2662            struct timeval tv;
2663            socklen_t tvlen;
2664
2665            optname = SO_RCVTIMEO;
2666
2667get_timeout:
2668            if (get_user_u32(len, optlen)) {
2669                return -TARGET_EFAULT;
2670            }
2671            if (len < 0) {
2672                return -TARGET_EINVAL;
2673            }
2674
2675            tvlen = sizeof(tv);
2676            ret = get_errno(getsockopt(sockfd, level, optname,
2677                                       &tv, &tvlen));
2678            if (ret < 0) {
2679                return ret;
2680            }
2681            if (len > sizeof(struct target_timeval)) {
2682                len = sizeof(struct target_timeval);
2683            }
2684            if (copy_to_user_timeval(optval_addr, &tv)) {
2685                return -TARGET_EFAULT;
2686            }
2687            if (put_user_u32(len, optlen)) {
2688                return -TARGET_EFAULT;
2689            }
2690            break;
2691        }
2692        case TARGET_SO_SNDTIMEO:
2693            optname = SO_SNDTIMEO;
2694            goto get_timeout;
2695        case TARGET_SO_PEERCRED: {
2696            struct ucred cr;
2697            socklen_t crlen;
2698            struct target_ucred *tcr;
2699
2700            if (get_user_u32(len, optlen)) {
2701                return -TARGET_EFAULT;
2702            }
2703            if (len < 0) {
2704                return -TARGET_EINVAL;
2705            }
2706
2707            crlen = sizeof(cr);
2708            ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2709                                       &cr, &crlen));
2710            if (ret < 0) {
2711                return ret;
2712            }
2713            if (len > crlen) {
2714                len = crlen;
2715            }
2716            if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2717                return -TARGET_EFAULT;
2718            }
2719            __put_user(cr.pid, &tcr->pid);
2720            __put_user(cr.uid, &tcr->uid);
2721            __put_user(cr.gid, &tcr->gid);
2722            unlock_user_struct(tcr, optval_addr, 1);
2723            if (put_user_u32(len, optlen)) {
2724                return -TARGET_EFAULT;
2725            }
2726            break;
2727        }
2728        case TARGET_SO_PEERSEC: {
2729            char *name;
2730
2731            if (get_user_u32(len, optlen)) {
2732                return -TARGET_EFAULT;
2733            }
2734            if (len < 0) {
2735                return -TARGET_EINVAL;
2736            }
2737            name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2738            if (!name) {
2739                return -TARGET_EFAULT;
2740            }
2741            lv = len;
2742            ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2743                                       name, &lv));
2744            if (put_user_u32(lv, optlen)) {
2745                ret = -TARGET_EFAULT;
2746            }
2747            unlock_user(name, optval_addr, lv);
2748            break;
2749        }
2750        case TARGET_SO_LINGER:
2751        {
2752            struct linger lg;
2753            socklen_t lglen;
2754            struct target_linger *tlg;
2755
2756            if (get_user_u32(len, optlen)) {
2757                return -TARGET_EFAULT;
2758            }
2759            if (len < 0) {
2760                return -TARGET_EINVAL;
2761            }
2762
2763            lglen = sizeof(lg);
2764            ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2765                                       &lg, &lglen));
2766            if (ret < 0) {
2767                return ret;
2768            }
2769            if (len > lglen) {
2770                len = lglen;
2771            }
2772            if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2773                return -TARGET_EFAULT;
2774            }
2775            __put_user(lg.l_onoff, &tlg->l_onoff);
2776            __put_user(lg.l_linger, &tlg->l_linger);
2777            unlock_user_struct(tlg, optval_addr, 1);
2778            if (put_user_u32(len, optlen)) {
2779                return -TARGET_EFAULT;
2780            }
2781            break;
2782        }
2783        /* Options with 'int' argument.  */
2784        case TARGET_SO_DEBUG:
2785            optname = SO_DEBUG;
2786            goto int_case;
2787        case TARGET_SO_REUSEADDR:
2788            optname = SO_REUSEADDR;
2789            goto int_case;
2790#ifdef SO_REUSEPORT
2791        case TARGET_SO_REUSEPORT:
2792            optname = SO_REUSEPORT;
2793            goto int_case;
2794#endif
2795        case TARGET_SO_TYPE:
2796            optname = SO_TYPE;
2797            goto int_case;
2798        case TARGET_SO_ERROR:
2799            optname = SO_ERROR;
2800            goto int_case;
2801        case TARGET_SO_DONTROUTE:
2802            optname = SO_DONTROUTE;
2803            goto int_case;
2804        case TARGET_SO_BROADCAST:
2805            optname = SO_BROADCAST;
2806            goto int_case;
2807        case TARGET_SO_SNDBUF:
2808            optname = SO_SNDBUF;
2809            goto int_case;
2810        case TARGET_SO_RCVBUF:
2811            optname = SO_RCVBUF;
2812            goto int_case;
2813        case TARGET_SO_KEEPALIVE:
2814            optname = SO_KEEPALIVE;
2815            goto int_case;
2816        case TARGET_SO_OOBINLINE:
2817            optname = SO_OOBINLINE;
2818            goto int_case;
2819        case TARGET_SO_NO_CHECK:
2820            optname = SO_NO_CHECK;
2821            goto int_case;
2822        case TARGET_SO_PRIORITY:
2823            optname = SO_PRIORITY;
2824            goto int_case;
2825#ifdef SO_BSDCOMPAT
2826        case TARGET_SO_BSDCOMPAT:
2827            optname = SO_BSDCOMPAT;
2828            goto int_case;
2829#endif
2830        case TARGET_SO_PASSCRED:
2831            optname = SO_PASSCRED;
2832            goto int_case;
2833        case TARGET_SO_TIMESTAMP:
2834            optname = SO_TIMESTAMP;
2835            goto int_case;
2836        case TARGET_SO_RCVLOWAT:
2837            optname = SO_RCVLOWAT;
2838            goto int_case;
2839        case TARGET_SO_ACCEPTCONN:
2840            optname = SO_ACCEPTCONN;
2841            goto int_case;
2842        case TARGET_SO_PROTOCOL:
2843            optname = SO_PROTOCOL;
2844            goto int_case;
2845        case TARGET_SO_DOMAIN:
2846            optname = SO_DOMAIN;
2847            goto int_case;
2848        default:
2849            goto int_case;
2850        }
2851        break;
2852    case SOL_TCP:
2853    case SOL_UDP:
2854        /* TCP and UDP options all take an 'int' value.  */
2855    int_case:
2856        if (get_user_u32(len, optlen))
2857            return -TARGET_EFAULT;
2858        if (len < 0)
2859            return -TARGET_EINVAL;
2860        lv = sizeof(lv);
2861        ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2862        if (ret < 0)
2863            return ret;
2864        if (optname == SO_TYPE) {
2865            val = host_to_target_sock_type(val);
2866        }
2867        if (len > lv)
2868            len = lv;
2869        if (len == 4) {
2870            if (put_user_u32(val, optval_addr))
2871                return -TARGET_EFAULT;
2872        } else {
2873            if (put_user_u8(val, optval_addr))
2874                return -TARGET_EFAULT;
2875        }
2876        if (put_user_u32(len, optlen))
2877            return -TARGET_EFAULT;
2878        break;
2879    case SOL_IP:
2880        switch(optname) {
2881        case IP_TOS:
2882        case IP_TTL:
2883        case IP_HDRINCL:
2884        case IP_ROUTER_ALERT:
2885        case IP_RECVOPTS:
2886        case IP_RETOPTS:
2887        case IP_PKTINFO:
2888        case IP_MTU_DISCOVER:
2889        case IP_RECVERR:
2890        case IP_RECVTOS:
2891#ifdef IP_FREEBIND
2892        case IP_FREEBIND:
2893#endif
2894        case IP_MULTICAST_TTL:
2895        case IP_MULTICAST_LOOP:
2896            if (get_user_u32(len, optlen))
2897                return -TARGET_EFAULT;
2898            if (len < 0)
2899                return -TARGET_EINVAL;
2900            lv = sizeof(lv);
2901            ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2902            if (ret < 0)
2903                return ret;
2904            if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2905                len = 1;
2906                if (put_user_u32(len, optlen)
2907                    || put_user_u8(val, optval_addr))
2908                    return -TARGET_EFAULT;
2909            } else {
2910                if (len > sizeof(int))
2911                    len = sizeof(int);
2912                if (put_user_u32(len, optlen)
2913                    || put_user_u32(val, optval_addr))
2914                    return -TARGET_EFAULT;
2915            }
2916            break;
2917        default:
2918            ret = -TARGET_ENOPROTOOPT;
2919            break;
2920        }
2921        break;
2922    case SOL_IPV6:
2923        switch (optname) {
2924        case IPV6_MTU_DISCOVER:
2925        case IPV6_MTU:
2926        case IPV6_V6ONLY:
2927        case IPV6_RECVPKTINFO:
2928        case IPV6_UNICAST_HOPS:
2929        case IPV6_MULTICAST_HOPS:
2930        case IPV6_MULTICAST_LOOP:
2931        case IPV6_RECVERR:
2932        case IPV6_RECVHOPLIMIT:
2933        case IPV6_2292HOPLIMIT:
2934        case IPV6_CHECKSUM:
2935        case IPV6_ADDRFORM:
2936        case IPV6_2292PKTINFO:
2937        case IPV6_RECVTCLASS:
2938        case IPV6_RECVRTHDR:
2939        case IPV6_2292RTHDR:
2940        case IPV6_RECVHOPOPTS:
2941        case IPV6_2292HOPOPTS:
2942        case IPV6_RECVDSTOPTS:
2943        case IPV6_2292DSTOPTS:
2944        case IPV6_TCLASS:
2945        case IPV6_ADDR_PREFERENCES:
2946#ifdef IPV6_RECVPATHMTU
2947        case IPV6_RECVPATHMTU:
2948#endif
2949#ifdef IPV6_TRANSPARENT
2950        case IPV6_TRANSPARENT:
2951#endif
2952#ifdef IPV6_FREEBIND
2953        case IPV6_FREEBIND:
2954#endif
2955#ifdef IPV6_RECVORIGDSTADDR
2956        case IPV6_RECVORIGDSTADDR:
2957#endif
2958            if (get_user_u32(len, optlen))
2959                return -TARGET_EFAULT;
2960            if (len < 0)
2961                return -TARGET_EINVAL;
2962            lv = sizeof(lv);
2963            ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2964            if (ret < 0)
2965                return ret;
2966            if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2967                len = 1;
2968                if (put_user_u32(len, optlen)
2969                    || put_user_u8(val, optval_addr))
2970                    return -TARGET_EFAULT;
2971            } else {
2972                if (len > sizeof(int))
2973                    len = sizeof(int);
2974                if (put_user_u32(len, optlen)
2975                    || put_user_u32(val, optval_addr))
2976                    return -TARGET_EFAULT;
2977            }
2978            break;
2979        default:
2980            ret = -TARGET_ENOPROTOOPT;
2981            break;
2982        }
2983        break;
2984#ifdef SOL_NETLINK
2985    case SOL_NETLINK:
2986        switch (optname) {
2987        case NETLINK_PKTINFO:
2988        case NETLINK_BROADCAST_ERROR:
2989        case NETLINK_NO_ENOBUFS:
2990#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2991        case NETLINK_LISTEN_ALL_NSID:
2992        case NETLINK_CAP_ACK:
2993#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2994#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2995        case NETLINK_EXT_ACK:
2996#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2997#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2998        case NETLINK_GET_STRICT_CHK:
2999#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
3000            if (get_user_u32(len, optlen)) {
3001                return -TARGET_EFAULT;
3002            }
3003            if (len != sizeof(val)) {
3004                return -TARGET_EINVAL;
3005            }
3006            lv = len;
3007            ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3008            if (ret < 0) {
3009                return ret;
3010            }
3011            if (put_user_u32(lv, optlen)
3012                || put_user_u32(val, optval_addr)) {
3013                return -TARGET_EFAULT;
3014            }
3015            break;
3016#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3017        case NETLINK_LIST_MEMBERSHIPS:
3018        {
3019            uint32_t *results;
3020            int i;
3021            if (get_user_u32(len, optlen)) {
3022                return -TARGET_EFAULT;
3023            }
3024            if (len < 0) {
3025                return -TARGET_EINVAL;
3026            }
3027            results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3028            if (!results && len > 0) {
3029                return -TARGET_EFAULT;
3030            }
3031            lv = len;
3032            ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3033            if (ret < 0) {
3034                unlock_user(results, optval_addr, 0);
3035                return ret;
3036            }
3037            /* swap host endianess to target endianess. */
3038            for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3039                results[i] = tswap32(results[i]);
3040            }
3041            if (put_user_u32(lv, optlen)) {
3042                return -TARGET_EFAULT;
3043            }
3044            unlock_user(results, optval_addr, 0);
3045            break;
3046        }
3047#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3048        default:
3049            goto unimplemented;
3050        }
3051        break;
3052#endif /* SOL_NETLINK */
3053    default:
3054    unimplemented:
3055        qemu_log_mask(LOG_UNIMP,
3056                      "getsockopt level=%d optname=%d not yet supported\n",
3057                      level, optname);
3058        ret = -TARGET_EOPNOTSUPP;
3059        break;
3060    }
3061    return ret;
3062}
3063
3064/* Convert target low/high pair representing file offset into the host
3065 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3066 * as the kernel doesn't handle them either.
3067 */
3068static void target_to_host_low_high(abi_ulong tlow,
3069                                    abi_ulong thigh,
3070                                    unsigned long *hlow,
3071                                    unsigned long *hhigh)
3072{
3073    uint64_t off = tlow |
3074        ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3075        TARGET_LONG_BITS / 2;
3076
3077    *hlow = off;
3078    *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3079}
3080
3081static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3082                                abi_ulong count, int copy)
3083{
3084    struct target_iovec *target_vec;
3085    struct iovec *vec;
3086    abi_ulong total_len, max_len;
3087    int i;
3088    int err = 0;
3089    bool bad_address = false;
3090
3091    if (count == 0) {
3092        errno = 0;
3093        return NULL;
3094    }
3095    if (count > IOV_MAX) {
3096        errno = EINVAL;
3097        return NULL;
3098    }
3099
3100    vec = g_try_new0(struct iovec, count);
3101    if (vec == NULL) {
3102        errno = ENOMEM;
3103        return NULL;
3104    }
3105
3106    target_vec = lock_user(VERIFY_READ, target_addr,
3107                           count * sizeof(struct target_iovec), 1);
3108    if (target_vec == NULL) {
3109        err = EFAULT;
3110        goto fail2;
3111    }
3112
3113    /* ??? If host page size > target page size, this will result in a
3114       value larger than what we can actually support.  */
3115    max_len = 0x7fffffff & TARGET_PAGE_MASK;
3116    total_len = 0;
3117
3118    for (i = 0; i < count; i++) {
3119        abi_ulong base = tswapal(target_vec[i].iov_base);
3120        abi_long len = tswapal(target_vec[i].iov_len);
3121
3122        if (len < 0) {
3123            err = EINVAL;
3124            goto fail;
3125        } else if (len == 0) {
3126            /* Zero length pointer is ignored.  */
3127            vec[i].iov_base = 0;
3128        } else {
3129            vec[i].iov_base = lock_user(type, base, len, copy);
3130            /* If the first buffer pointer is bad, this is a fault.  But
3131             * subsequent bad buffers will result in a partial write; this
3132             * is realized by filling the vector with null pointers and
3133             * zero lengths. */
3134            if (!vec[i].iov_base) {
3135                if (i == 0) {
3136                    err = EFAULT;
3137                    goto fail;
3138                } else {
3139                    bad_address = true;
3140                }
3141            }
3142            if (bad_address) {
3143                len = 0;
3144            }
3145            if (len > max_len - total_len) {
3146                len = max_len - total_len;
3147            }
3148        }
3149        vec[i].iov_len = len;
3150        total_len += len;
3151    }
3152
3153    unlock_user(target_vec, target_addr, 0);
3154    return vec;
3155
3156 fail:
3157    while (--i >= 0) {
3158        if (tswapal(target_vec[i].iov_len) > 0) {
3159            unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3160        }
3161    }
3162    unlock_user(target_vec, target_addr, 0);
3163 fail2:
3164    g_free(vec);
3165    errno = err;
3166    return NULL;
3167}
3168
3169static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3170                         abi_ulong count, int copy)
3171{
3172    struct target_iovec *target_vec;
3173    int i;
3174
3175    target_vec = lock_user(VERIFY_READ, target_addr,
3176                           count * sizeof(struct target_iovec), 1);
3177    if (target_vec) {
3178        for (i = 0; i < count; i++) {
3179            abi_ulong base = tswapal(target_vec[i].iov_base);
3180            abi_long len = tswapal(target_vec[i].iov_len);
3181            if (len < 0) {
3182                break;
3183            }
3184            unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3185        }
3186        unlock_user(target_vec, target_addr, 0);
3187    }
3188
3189    g_free(vec);
3190}
3191
3192static inline int target_to_host_sock_type(int *type)
3193{
3194    int host_type = 0;
3195    int target_type = *type;
3196
3197    switch (target_type & TARGET_SOCK_TYPE_MASK) {
3198    case TARGET_SOCK_DGRAM:
3199        host_type = SOCK_DGRAM;
3200        break;
3201    case TARGET_SOCK_STREAM:
3202        host_type = SOCK_STREAM;
3203        break;
3204    default:
3205        host_type = target_type & TARGET_SOCK_TYPE_MASK;
3206        break;
3207    }
3208    if (target_type & TARGET_SOCK_CLOEXEC) {
3209#if defined(SOCK_CLOEXEC)
3210        host_type |= SOCK_CLOEXEC;
3211#else
3212        return -TARGET_EINVAL;
3213#endif
3214    }
3215    if (target_type & TARGET_SOCK_NONBLOCK) {
3216#if defined(SOCK_NONBLOCK)
3217        host_type |= SOCK_NONBLOCK;
3218#elif !defined(O_NONBLOCK)
3219        return -TARGET_EINVAL;
3220#endif
3221    }
3222    *type = host_type;
3223    return 0;
3224}
3225
3226/* Try to emulate socket type flags after socket creation.  */
3227static int sock_flags_fixup(int fd, int target_type)
3228{
3229#if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3230    if (target_type & TARGET_SOCK_NONBLOCK) {
3231        int flags = fcntl(fd, F_GETFL);
3232        if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3233            close(fd);
3234            return -TARGET_EINVAL;
3235        }
3236    }
3237#endif
3238    return fd;
3239}
3240
3241/* do_socket() Must return target values and target errnos. */
3242static abi_long do_socket(int domain, int type, int protocol)
3243{
3244    int target_type = type;
3245    int ret;
3246
3247    ret = target_to_host_sock_type(&type);
3248    if (ret) {
3249        return ret;
3250    }
3251
3252    if (domain == PF_NETLINK && !(
3253#ifdef CONFIG_RTNETLINK
3254         protocol == NETLINK_ROUTE ||
3255#endif
3256         protocol == NETLINK_KOBJECT_UEVENT ||
3257         protocol == NETLINK_AUDIT)) {
3258        return -TARGET_EPROTONOSUPPORT;
3259    }
3260
3261    if (domain == AF_PACKET ||
3262        (domain == AF_INET && type == SOCK_PACKET)) {
3263        protocol = tswap16(protocol);
3264    }
3265
3266    ret = get_errno(socket(domain, type, protocol));
3267    if (ret >= 0) {
3268        ret = sock_flags_fixup(ret, target_type);
3269        if (type == SOCK_PACKET) {
3270            /* Manage an obsolete case :
3271             * if socket type is SOCK_PACKET, bind by name
3272             */
3273            fd_trans_register(ret, &target_packet_trans);
3274        } else if (domain == PF_NETLINK) {
3275            switch (protocol) {
3276#ifdef CONFIG_RTNETLINK
3277            case NETLINK_ROUTE:
3278                fd_trans_register(ret, &target_netlink_route_trans);
3279                break;
3280#endif
3281            case NETLINK_KOBJECT_UEVENT:
3282                /* nothing to do: messages are strings */
3283                break;
3284            case NETLINK_AUDIT:
3285                fd_trans_register(ret, &target_netlink_audit_trans);
3286                break;
3287            default:
3288                g_assert_not_reached();
3289            }
3290        }
3291    }
3292    return ret;
3293}
3294
3295/* do_bind() Must return target values and target errnos. */
3296static abi_long do_bind(int sockfd, abi_ulong target_addr,
3297                        socklen_t addrlen)
3298{
3299    void *addr;
3300    abi_long ret;
3301
3302    if ((int)addrlen < 0) {
3303        return -TARGET_EINVAL;
3304    }
3305
3306    addr = alloca(addrlen+1);
3307
3308    ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3309    if (ret)
3310        return ret;
3311
3312    return get_errno(bind(sockfd, addr, addrlen));
3313}
3314
3315/* do_connect() Must return target values and target errnos. */
3316static abi_long do_connect(int sockfd, abi_ulong target_addr,
3317                           socklen_t addrlen)
3318{
3319    void *addr;
3320    abi_long ret;
3321
3322    if ((int)addrlen < 0) {
3323        return -TARGET_EINVAL;
3324    }
3325
3326    addr = alloca(addrlen+1);
3327
3328    ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3329    if (ret)
3330        return ret;
3331
3332    return get_errno(safe_connect(sockfd, addr, addrlen));
3333}
3334
3335/* do_sendrecvmsg_locked() Must return target values and target errnos. */
3336static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3337                                      int flags, int send)
3338{
3339    abi_long ret, len;
3340    struct msghdr msg;
3341    abi_ulong count;
3342    struct iovec *vec;
3343    abi_ulong target_vec;
3344
3345    if (msgp->msg_name) {
3346        msg.msg_namelen = tswap32(msgp->msg_namelen);
3347        msg.msg_name = alloca(msg.msg_namelen+1);
3348        ret = target_to_host_sockaddr(fd, msg.msg_name,
3349                                      tswapal(msgp->msg_name),
3350                                      msg.msg_namelen);
3351        if (ret == -TARGET_EFAULT) {
3352            /* For connected sockets msg_name and msg_namelen must
3353             * be ignored, so returning EFAULT immediately is wrong.
3354             * Instead, pass a bad msg_name to the host kernel, and
3355             * let it decide whether to return EFAULT or not.
3356             */
3357            msg.msg_name = (void *)-1;
3358        } else if (ret) {
3359            goto out2;
3360        }
3361    } else {
3362        msg.msg_name = NULL;
3363        msg.msg_namelen = 0;
3364    }
3365    msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3366    msg.msg_control = alloca(msg.msg_controllen);
3367    memset(msg.msg_control, 0, msg.msg_controllen);
3368
3369    msg.msg_flags = tswap32(msgp->msg_flags);
3370
3371    count = tswapal(msgp->msg_iovlen);
3372    target_vec = tswapal(msgp->msg_iov);
3373
3374    if (count > IOV_MAX) {
3375        /* sendrcvmsg returns a different errno for this condition than
3376         * readv/writev, so we must catch it here before lock_iovec() does.
3377         */
3378        ret = -TARGET_EMSGSIZE;
3379        goto out2;
3380    }
3381
3382    vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3383                     target_vec, count, send);
3384    if (vec == NULL) {
3385        ret = -host_to_target_errno(errno);
3386        goto out2;
3387    }
3388    msg.msg_iovlen = count;
3389    msg.msg_iov = vec;
3390
3391    if (send) {
3392        if (fd_trans_target_to_host_data(fd)) {
3393            void *host_msg;
3394
3395            host_msg = g_malloc(msg.msg_iov->iov_len);
3396            memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3397            ret = fd_trans_target_to_host_data(fd)(host_msg,
3398                                                   msg.msg_iov->iov_len);
3399            if (ret >= 0) {
3400                msg.msg_iov->iov_base = host_msg;
3401                ret = get_errno(safe_sendmsg(fd, &msg, flags));
3402            }
3403            g_free(host_msg);
3404        } else {
3405            ret = target_to_host_cmsg(&msg, msgp);
3406            if (ret == 0) {
3407                ret = get_errno(safe_sendmsg(fd, &msg, flags));
3408            }
3409        }
3410    } else {
3411        ret = get_errno(safe_recvmsg(fd, &msg, flags));
3412        if (!is_error(ret)) {
3413            len = ret;
3414            if (fd_trans_host_to_target_data(fd)) {
3415                ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3416                                               MIN(msg.msg_iov->iov_len, len));
3417            } else {
3418                ret = host_to_target_cmsg(msgp, &msg);
3419            }
3420            if (!is_error(ret)) {
3421                msgp->msg_namelen = tswap32(msg.msg_namelen);
3422                msgp->msg_flags = tswap32(msg.msg_flags);
3423                if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3424                    ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3425                                    msg.msg_name, msg.msg_namelen);
3426                    if (ret) {
3427                        goto out;
3428                    }
3429                }
3430
3431                ret = len;
3432            }
3433        }
3434    }
3435
3436out:
3437    unlock_iovec(vec, target_vec, count, !send);
3438out2:
3439    return ret;
3440}
3441
3442static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3443                               int flags, int send)
3444{
3445    abi_long ret;
3446    struct target_msghdr *msgp;
3447
3448    if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3449                          msgp,
3450                          target_msg,
3451                          send ? 1 : 0)) {
3452        return -TARGET_EFAULT;
3453    }
3454    ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3455    unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3456    return ret;
3457}
3458
3459/* We don't rely on the C library to have sendmmsg/recvmmsg support,
3460 * so it might not have this *mmsg-specific flag either.
3461 */
3462#ifndef MSG_WAITFORONE
3463#define MSG_WAITFORONE 0x10000
3464#endif
3465
3466static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3467                                unsigned int vlen, unsigned int flags,
3468                                int send)
3469{
3470    struct target_mmsghdr *mmsgp;
3471    abi_long ret = 0;
3472    int i;
3473
3474    if (vlen > UIO_MAXIOV) {
3475        vlen = UIO_MAXIOV;
3476    }
3477
3478    mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3479    if (!mmsgp) {
3480        return -TARGET_EFAULT;
3481    }
3482
3483    for (i = 0; i < vlen; i++) {
3484        ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3485        if (is_error(ret)) {
3486            break;
3487        }
3488        mmsgp[i].msg_len = tswap32(ret);
3489        /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3490        if (flags & MSG_WAITFORONE) {
3491            flags |= MSG_DONTWAIT;
3492        }
3493    }
3494
3495    unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3496
3497    /* Return number of datagrams sent if we sent any at all;
3498     * otherwise return the error.
3499     */
3500    if (i) {
3501        return i;
3502    }
3503    return ret;
3504}
3505
3506/* do_accept4() Must return target values and target errnos. */
3507static abi_long do_accept4(int fd, abi_ulong target_addr,
3508                           abi_ulong target_addrlen_addr, int flags)
3509{
3510    socklen_t addrlen, ret_addrlen;
3511    void *addr;
3512    abi_long ret;
3513    int host_flags;
3514
3515    host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3516
3517    if (target_addr == 0) {
3518        return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3519    }
3520
3521    /* linux returns EFAULT if addrlen pointer is invalid */
3522    if (get_user_u32(addrlen, target_addrlen_addr))
3523        return -TARGET_EFAULT;
3524
3525    if ((int)addrlen < 0) {
3526        return -TARGET_EINVAL;
3527    }
3528
3529    if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3530        return -TARGET_EFAULT;
3531    }
3532
3533    addr = alloca(addrlen);
3534
3535    ret_addrlen = addrlen;
3536    ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3537    if (!is_error(ret)) {
3538        host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3539        if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3540            ret = -TARGET_EFAULT;
3541        }
3542    }
3543    return ret;
3544}
3545
3546/* do_getpeername() Must return target values and target errnos. */
3547static abi_long do_getpeername(int fd, abi_ulong target_addr,
3548                               abi_ulong target_addrlen_addr)
3549{
3550    socklen_t addrlen, ret_addrlen;
3551    void *addr;
3552    abi_long ret;
3553
3554    if (get_user_u32(addrlen, target_addrlen_addr))
3555        return -TARGET_EFAULT;
3556
3557    if ((int)addrlen < 0) {
3558        return -TARGET_EINVAL;
3559    }
3560
3561    if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3562        return -TARGET_EFAULT;
3563    }
3564
3565    addr = alloca(addrlen);
3566
3567    ret_addrlen = addrlen;
3568    ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3569    if (!is_error(ret)) {
3570        host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3571        if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3572            ret = -TARGET_EFAULT;
3573        }
3574    }
3575    return ret;
3576}
3577
3578/* do_getsockname() Must return target values and target errnos. */
3579static abi_long do_getsockname(int fd, abi_ulong target_addr,
3580                               abi_ulong target_addrlen_addr)
3581{
3582    socklen_t addrlen, ret_addrlen;
3583    void *addr;
3584    abi_long ret;
3585
3586    if (get_user_u32(addrlen, target_addrlen_addr))
3587        return -TARGET_EFAULT;
3588
3589    if ((int)addrlen < 0) {
3590        return -TARGET_EINVAL;
3591    }
3592
3593    if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3594        return -TARGET_EFAULT;
3595    }
3596
3597    addr = alloca(addrlen);
3598
3599    ret_addrlen = addrlen;
3600    ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3601    if (!is_error(ret)) {
3602        host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3603        if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3604            ret = -TARGET_EFAULT;
3605        }
3606    }
3607    return ret;
3608}
3609
3610/* do_socketpair() Must return target values and target errnos. */
3611static abi_long do_socketpair(int domain, int type, int protocol,
3612                              abi_ulong target_tab_addr)
3613{
3614    int tab[2];
3615    abi_long ret;
3616
3617    target_to_host_sock_type(&type);
3618
3619    ret = get_errno(socketpair(domain, type, protocol, tab));
3620    if (!is_error(ret)) {
3621        if (put_user_s32(tab[0], target_tab_addr)
3622            || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3623            ret = -TARGET_EFAULT;
3624    }
3625    return ret;
3626}
3627
3628/* do_sendto() Must return target values and target errnos. */
3629static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3630                          abi_ulong target_addr, socklen_t addrlen)
3631{
3632    void *addr;
3633    void *host_msg;
3634    void *copy_msg = NULL;
3635    abi_long ret;
3636
3637    if ((int)addrlen < 0) {
3638        return -TARGET_EINVAL;
3639    }
3640
3641    host_msg = lock_user(VERIFY_READ, msg, len, 1);
3642    if (!host_msg)
3643        return -TARGET_EFAULT;
3644    if (fd_trans_target_to_host_data(fd)) {
3645        copy_msg = host_msg;
3646        host_msg = g_malloc(len);
3647        memcpy(host_msg, copy_msg, len);
3648        ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3649        if (ret < 0) {
3650            goto fail;
3651        }
3652    }
3653    if (target_addr) {
3654        addr = alloca(addrlen+1);
3655        ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3656        if (ret) {
3657            goto fail;
3658        }
3659        ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3660    } else {
3661        ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3662    }
3663fail:
3664    if (copy_msg) {
3665        g_free(host_msg);
3666        host_msg = copy_msg;
3667    }
3668    unlock_user(host_msg, msg, 0);
3669    return ret;
3670}
3671
3672/* do_recvfrom() Must return target values and target errnos. */
3673static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3674                            abi_ulong target_addr,
3675                            abi_ulong target_addrlen)
3676{
3677    socklen_t addrlen, ret_addrlen;
3678    void *addr;
3679    void *host_msg;
3680    abi_long ret;
3681
3682    if (!msg) {
3683        host_msg = NULL;
3684    } else {
3685        host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3686        if (!host_msg) {
3687            return -TARGET_EFAULT;
3688        }
3689    }
3690    if (target_addr) {
3691        if (get_user_u32(addrlen, target_addrlen)) {
3692            ret = -TARGET_EFAULT;
3693            goto fail;
3694        }
3695        if ((int)addrlen < 0) {
3696            ret = -TARGET_EINVAL;
3697            goto fail;
3698        }
3699        addr = alloca(addrlen);
3700        ret_addrlen = addrlen;
3701        ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3702                                      addr, &ret_addrlen));
3703    } else {
3704        addr = NULL; /* To keep compiler quiet.  */
3705        addrlen = 0; /* To keep compiler quiet.  */
3706        ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3707    }
3708    if (!is_error(ret)) {
3709        if (fd_trans_host_to_target_data(fd)) {
3710            abi_long trans;
3711            trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3712            if (is_error(trans)) {
3713                ret = trans;
3714                goto fail;
3715            }
3716        }
3717        if (target_addr) {
3718            host_to_target_sockaddr(target_addr, addr,
3719                                    MIN(addrlen, ret_addrlen));
3720            if (put_user_u32(ret_addrlen, target_addrlen)) {
3721                ret = -TARGET_EFAULT;
3722                goto fail;
3723            }
3724        }
3725        unlock_user(host_msg, msg, len);
3726    } else {
3727fail:
3728        unlock_user(host_msg, msg, 0);
3729    }
3730    return ret;
3731}
3732
3733#ifdef TARGET_NR_socketcall
3734/* do_socketcall() must return target values and target errnos. */
3735static abi_long do_socketcall(int num, abi_ulong vptr)
3736{
3737    static const unsigned nargs[] = { /* number of arguments per operation */
3738        [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3739        [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3740        [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3741        [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3742        [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3743        [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3744        [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3745        [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3746        [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3747        [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3748        [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3749        [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3750        [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3751        [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3752        [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3753        [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3754        [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3755        [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3756        [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3757        [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3758    };
3759    abi_long a[6]; /* max 6 args */
3760    unsigned i;
3761
3762    /* check the range of the first argument num */
3763    /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3764    if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3765        return -TARGET_EINVAL;
3766    }
3767    /* ensure we have space for args */
3768    if (nargs[num] > ARRAY_SIZE(a)) {
3769        return -TARGET_EINVAL;
3770    }
3771    /* collect the arguments in a[] according to nargs[] */
3772    for (i = 0; i < nargs[num]; ++i) {
3773        if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3774            return -TARGET_EFAULT;
3775        }
3776    }
3777    /* now when we have the args, invoke the appropriate underlying function */
3778    switch (num) {
3779    case TARGET_SYS_SOCKET: /* domain, type, protocol */
3780        return do_socket(a[0], a[1], a[2]);
3781    case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3782        return do_bind(a[0], a[1], a[2]);
3783    case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3784        return do_connect(a[0], a[1], a[2]);
3785    case TARGET_SYS_LISTEN: /* sockfd, backlog */
3786        return get_errno(listen(a[0], a[1]));
3787    case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3788        return do_accept4(a[0], a[1], a[2], 0);
3789    case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3790        return do_getsockname(a[0], a[1], a[2]);
3791    case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3792        return do_getpeername(a[0], a[1], a[2]);
3793    case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3794        return do_socketpair(a[0], a[1], a[2], a[3]);
3795    case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3796        return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3797    case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3798        return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3799    case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3800        return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3801    case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3802        return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3803    case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3804        return get_errno(shutdown(a[0], a[1]));
3805    case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3806        return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3807    case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3808        return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3809    case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3810        return do_sendrecvmsg(a[0], a[1], a[2], 1);
3811    case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3812        return do_sendrecvmsg(a[0], a[1], a[2], 0);
3813    case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3814        return do_accept4(a[0], a[1], a[2], a[3]);
3815    case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3816        return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3817    case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3818        return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3819    default:
3820        qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3821        return -TARGET_EINVAL;
3822    }
3823}
3824#endif
3825
3826#define N_SHM_REGIONS   32
3827
3828static struct shm_region {
3829    abi_ulong start;
3830    abi_ulong size;
3831    bool in_use;
3832} shm_regions[N_SHM_REGIONS];
3833
3834#ifndef TARGET_SEMID64_DS
3835/* asm-generic version of this struct */
3836struct target_semid64_ds
3837{
3838  struct target_ipc_perm sem_perm;
3839  abi_ulong sem_otime;
3840#if TARGET_ABI_BITS == 32
3841  abi_ulong __unused1;
3842#endif
3843  abi_ulong sem_ctime;
3844#if TARGET_ABI_BITS == 32
3845  abi_ulong __unused2;
3846#endif
3847  abi_ulong sem_nsems;
3848  abi_ulong __unused3;
3849  abi_ulong __unused4;
3850};
3851#endif
3852
3853static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3854                                               abi_ulong target_addr)
3855{
3856    struct target_ipc_perm *target_ip;
3857    struct target_semid64_ds *target_sd;
3858
3859    if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3860        return -TARGET_EFAULT;
3861    target_ip = &(target_sd->sem_perm);
3862    host_ip->__key = tswap32(target_ip->__key);
3863    host_ip->uid = tswap32(target_ip->uid);
3864    host_ip->gid = tswap32(target_ip->gid);
3865    host_ip->cuid = tswap32(target_ip->cuid);
3866    host_ip->cgid = tswap32(target_ip->cgid);
3867#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3868    host_ip->mode = tswap32(target_ip->mode);
3869#else
3870    host_ip->mode = tswap16(target_ip->mode);
3871#endif
3872#if defined(TARGET_PPC)
3873    host_ip->__seq = tswap32(target_ip->__seq);
3874#else
3875    host_ip->__seq = tswap16(target_ip->__seq);
3876#endif
3877    unlock_user_struct(target_sd, target_addr, 0);
3878    return 0;
3879}
3880
3881static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3882                                               struct ipc_perm *host_ip)
3883{
3884    struct target_ipc_perm *target_ip;
3885    struct target_semid64_ds *target_sd;
3886
3887    if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3888        return -TARGET_EFAULT;
3889    target_ip = &(target_sd->sem_perm);
3890    target_ip->__key = tswap32(host_ip->__key);
3891    target_ip->uid = tswap32(host_ip->uid);
3892    target_ip->gid = tswap32(host_ip->gid);
3893    target_ip->cuid = tswap32(host_ip->cuid);
3894    target_ip->cgid = tswap32(host_ip->cgid);
3895#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3896    target_ip->mode = tswap32(host_ip->mode);
3897#else
3898    target_ip->mode = tswap16(host_ip->mode);
3899#endif
3900#if defined(TARGET_PPC)
3901    target_ip->__seq = tswap32(host_ip->__seq);
3902#else
3903    target_ip->__seq = tswap16(host_ip->__seq);
3904#endif
3905    unlock_user_struct(target_sd, target_addr, 1);
3906    return 0;
3907}
3908
3909static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3910                                               abi_ulong target_addr)
3911{
3912    struct target_semid64_ds *target_sd;
3913
3914    if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3915        return -TARGET_EFAULT;
3916    if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3917        return -TARGET_EFAULT;
3918    host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3919    host_sd->sem_otime = tswapal(target_sd->sem_otime);
3920    host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3921    unlock_user_struct(target_sd, target_addr, 0);
3922    return 0;
3923}
3924
3925static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3926                                               struct semid_ds *host_sd)
3927{
3928    struct target_semid64_ds *target_sd;
3929
3930    if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3931        return -TARGET_EFAULT;
3932    if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3933        return -TARGET_EFAULT;
3934    target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3935    target_sd->sem_otime = tswapal(host_sd->sem_otime);
3936    target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3937    unlock_user_struct(target_sd, target_addr, 1);
3938    return 0;
3939}
3940
3941struct target_seminfo {
3942    int semmap;
3943    int semmni;
3944    int semmns;
3945    int semmnu;
3946    int semmsl;
3947    int semopm;
3948    int semume;
3949    int semusz;
3950    int semvmx;
3951    int semaem;
3952};
3953
3954static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3955                                              struct seminfo *host_seminfo)
3956{
3957    struct target_seminfo *target_seminfo;
3958    if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3959        return -TARGET_EFAULT;
3960    __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3961    __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3962    __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3963    __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3964    __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3965    __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3966    __put_user(host_seminfo->semume, &target_seminfo->semume);
3967    __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3968    __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3969    __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3970    unlock_user_struct(target_seminfo, target_addr, 1);
3971    return 0;
3972}
3973
3974union semun {
3975        int val;
3976        struct semid_ds *buf;
3977        unsigned short *array;
3978        struct seminfo *__buf;
3979};
3980
3981union target_semun {
3982        int val;
3983        abi_ulong buf;
3984        abi_ulong array;
3985        abi_ulong __buf;
3986};
3987
3988static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3989                                               abi_ulong target_addr)
3990{
3991    int nsems;
3992    unsigned short *array;
3993    union semun semun;
3994    struct semid_ds semid_ds;
3995    int i, ret;
3996
3997    semun.buf = &semid_ds;
3998
3999    ret = semctl(semid, 0, IPC_STAT, semun);
4000    if (ret == -1)
4001        return get_errno(ret);
4002
4003    nsems = semid_ds.sem_nsems;
4004
4005    *host_array = g_try_new(unsigned short, nsems);
4006    if (!*host_array) {
4007        return -TARGET_ENOMEM;
4008    }
4009    array = lock_user(VERIFY_READ, target_addr,
4010                      nsems*sizeof(unsigned short), 1);
4011    if (!array) {
4012        g_free(*host_array);
4013        return -TARGET_EFAULT;
4014    }
4015
4016    for(i=0; i<nsems; i++) {
4017        __get_user((*host_array)[i], &array[i]);
4018    }
4019    unlock_user(array, target_addr, 0);
4020
4021    return 0;
4022}
4023
4024static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4025                                               unsigned short **host_array)
4026{
4027    int nsems;
4028    unsigned short *array;
4029    union semun semun;
4030    struct semid_ds semid_ds;
4031    int i, ret;
4032
4033    semun.buf = &semid_ds;
4034
4035    ret = semctl(semid, 0, IPC_STAT, semun);
4036    if (ret == -1)
4037        return get_errno(ret);
4038
4039    nsems = semid_ds.sem_nsems;
4040
4041    array = lock_user(VERIFY_WRITE, target_addr,
4042                      nsems*sizeof(unsigned short), 0);
4043    if (!array)
4044        return -TARGET_EFAULT;
4045
4046    for(i=0; i<nsems; i++) {
4047        __put_user((*host_array)[i], &array[i]);
4048    }
4049    g_free(*host_array);
4050    unlock_user(array, target_addr, 1);
4051
4052    return 0;
4053}
4054
4055static inline abi_long do_semctl(int semid, int semnum, int cmd,
4056                                 abi_ulong target_arg)
4057{
4058    union target_semun target_su = { .buf = target_arg };
4059    union semun arg;
4060    struct semid_ds dsarg;
4061    unsigned short *array = NULL;
4062    struct seminfo seminfo;
4063    abi_long ret = -TARGET_EINVAL;
4064    abi_long err;
4065    cmd &= 0xff;
4066
4067    switch( cmd ) {
4068        case GETVAL:
4069        case SETVAL:
4070            /* In 64 bit cross-endian situations, we will erroneously pick up
4071             * the wrong half of the union for the "val" element.  To rectify
4072             * this, the entire 8-byte structure is byteswapped, followed by
4073             * a swap of the 4 byte val field. In other cases, the data is
4074             * already in proper host byte order. */
4075            if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4076                target_su.buf = tswapal(target_su.buf);
4077                arg.val = tswap32(target_su.val);
4078            } else {
4079                arg.val = target_su.val;
4080            }
4081            ret = get_errno(semctl(semid, semnum, cmd, arg));
4082            break;
4083        case GETALL:
4084        case SETALL:
4085            err = target_to_host_semarray(semid, &array, target_su.array);
4086            if (err)
4087                return err;
4088            arg.array = array;
4089            ret = get_errno(semctl(semid, semnum, cmd, arg));
4090            err = host_to_target_semarray(semid, target_su.array, &array);
4091            if (err)
4092                return err;
4093            break;
4094        case IPC_STAT:
4095        case IPC_SET:
4096        case SEM_STAT:
4097            err = target_to_host_semid_ds(&dsarg, target_su.buf);
4098            if (err)
4099                return err;
4100            arg.buf = &dsarg;
4101            ret = get_errno(semctl(semid, semnum, cmd, arg));
4102            err = host_to_target_semid_ds(target_su.buf, &dsarg);
4103            if (err)
4104                return err;
4105            break;
4106        case IPC_INFO:
4107        case SEM_INFO:
4108            arg.__buf = &seminfo;
4109            ret = get_errno(semctl(semid, semnum, cmd, arg));
4110            err = host_to_target_seminfo(target_su.__buf, &seminfo);
4111            if (err)
4112                return err;
4113            break;
4114        case IPC_RMID:
4115        case GETPID:
4116        case GETNCNT:
4117        case GETZCNT:
4118            ret = get_errno(semctl(semid, semnum, cmd, NULL));
4119            break;
4120    }
4121
4122    return ret;
4123}
4124
4125struct target_sembuf {
4126    unsigned short sem_num;
4127    short sem_op;
4128    short sem_flg;
4129};
4130
4131static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4132                                             abi_ulong target_addr,
4133                                             unsigned nsops)
4134{
4135    struct target_sembuf *target_sembuf;
4136    int i;
4137
4138    target_sembuf = lock_user(VERIFY_READ, target_addr,
4139                              nsops*sizeof(struct target_sembuf), 1);
4140    if (!target_sembuf)
4141        return -TARGET_EFAULT;
4142
4143    for(i=0; i<nsops; i++) {
4144        __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4145        __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4146        __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4147    }
4148
4149    unlock_user(target_sembuf, target_addr, 0);
4150
4151    return 0;
4152}
4153
4154#if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4155    defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4156
4157/*
4158 * This macro is required to handle the s390 variants, which passes the
4159 * arguments in a different order than default.
4160 */
4161#ifdef __s390x__
4162#define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4163  (__nsops), (__timeout), (__sops)
4164#else
4165#define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4166  (__nsops), 0, (__sops), (__timeout)
4167#endif
4168
4169static inline abi_long do_semtimedop(int semid,
4170                                     abi_long ptr,
4171                                     unsigned nsops,
4172                                     abi_long timeout, bool time64)
4173{
4174    struct sembuf *sops;
4175    struct timespec ts, *pts = NULL;
4176    abi_long ret;
4177
4178    if (timeout) {
4179        pts = &ts;
4180        if (time64) {
4181            if (target_to_host_timespec64(pts, timeout)) {
4182                return -TARGET_EFAULT;
4183            }
4184        } else {
4185            if (target_to_host_timespec(pts, timeout)) {
4186                return -TARGET_EFAULT;
4187            }
4188        }
4189    }
4190
4191    if (nsops > TARGET_SEMOPM) {
4192        return -TARGET_E2BIG;
4193    }
4194
4195    sops = g_new(struct sembuf, nsops);
4196
4197    if (target_to_host_sembuf(sops, ptr, nsops)) {
4198        g_free(sops);
4199        return -TARGET_EFAULT;
4200    }
4201
4202    ret = -TARGET_ENOSYS;
4203#ifdef __NR_semtimedop
4204    ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4205#endif
4206#ifdef __NR_ipc
4207    if (ret == -TARGET_ENOSYS) {
4208        ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4209                                 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4210    }
4211#endif
4212    g_free(sops);
4213    return ret;
4214}
4215#endif
4216
4217struct target_msqid_ds
4218{
4219    struct target_ipc_perm msg_perm;
4220    abi_ulong msg_stime;
4221#if TARGET_ABI_BITS == 32
4222    abi_ulong __unused1;
4223#endif
4224    abi_ulong msg_rtime;
4225#if TARGET_ABI_BITS == 32
4226    abi_ulong __unused2;
4227#endif
4228    abi_ulong msg_ctime;
4229#if TARGET_ABI_BITS == 32
4230    abi_ulong __unused3;
4231#endif
4232    abi_ulong __msg_cbytes;
4233    abi_ulong msg_qnum;
4234    abi_ulong msg_qbytes;
4235    abi_ulong msg_lspid;
4236    abi_ulong msg_lrpid;
4237    abi_ulong __unused4;
4238    abi_ulong __unused5;
4239};
4240
4241static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4242                                               abi_ulong target_addr)
4243{
4244    struct target_msqid_ds *target_md;
4245
4246    if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4247        return -TARGET_EFAULT;
4248    if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4249        return -TARGET_EFAULT;
4250    host_md->msg_stime = tswapal(target_md->msg_stime);
4251    host_md->msg_rtime = tswapal(target_md->msg_rtime);
4252    host_md->msg_ctime = tswapal(target_md->msg_ctime);
4253    host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4254    host_md->msg_qnum = tswapal(target_md->msg_qnum);
4255    host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4256    host_md->msg_lspid = tswapal(target_md->msg_lspid);
4257    host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4258    unlock_user_struct(target_md, target_addr, 0);
4259    return 0;
4260}
4261
4262static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4263                                               struct msqid_ds *host_md)
4264{
4265    struct target_msqid_ds *target_md;
4266
4267    if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4268        return -TARGET_EFAULT;
4269    if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4270        return -TARGET_EFAULT;
4271    target_md->msg_stime = tswapal(host_md->msg_stime);
4272    target_md->msg_rtime = tswapal(host_md->msg_rtime);
4273    target_md->msg_ctime = tswapal(host_md->msg_ctime);
4274    target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4275    target_md->msg_qnum = tswapal(host_md->msg_qnum);
4276    target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4277    target_md->msg_lspid = tswapal(host_md->msg_lspid);
4278    target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4279    unlock_user_struct(target_md, target_addr, 1);
4280    return 0;
4281}
4282
4283struct target_msginfo {
4284    int msgpool;
4285    int msgmap;
4286    int msgmax;
4287    int msgmnb;
4288    int msgmni;
4289    int msgssz;
4290    int msgtql;
4291    unsigned short int msgseg;
4292};
4293
4294static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4295                                              struct msginfo *host_msginfo)
4296{
4297    struct target_msginfo *target_msginfo;
4298    if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4299        return -TARGET_EFAULT;
4300    __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4301    __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4302    __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4303    __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4304    __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4305    __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4306    __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4307    __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4308    unlock_user_struct(target_msginfo, target_addr, 1);
4309    return 0;
4310}
4311
4312static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4313{
4314    struct msqid_ds dsarg;
4315    struct msginfo msginfo;
4316    abi_long ret = -TARGET_EINVAL;
4317
4318    cmd &= 0xff;
4319
4320    switch (cmd) {
4321    case IPC_STAT:
4322    case IPC_SET:
4323    case MSG_STAT:
4324        if (target_to_host_msqid_ds(&dsarg,ptr))
4325            return -TARGET_EFAULT;
4326        ret = get_errno(msgctl(msgid, cmd, &dsarg));
4327        if (host_to_target_msqid_ds(ptr,&dsarg))
4328            return -TARGET_EFAULT;
4329        break;
4330    case IPC_RMID:
4331        ret = get_errno(msgctl(msgid, cmd, NULL));
4332        break;
4333    case IPC_INFO:
4334    case MSG_INFO:
4335        ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4336        if (host_to_target_msginfo(ptr, &msginfo))
4337            return -TARGET_EFAULT;
4338        break;
4339    }
4340
4341    return ret;
4342}
4343
4344struct target_msgbuf {
4345    abi_long mtype;
4346    char        mtext[1];
4347};
4348
4349static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4350                                 ssize_t msgsz, int msgflg)
4351{
4352    struct target_msgbuf *target_mb;
4353    struct msgbuf *host_mb;
4354    abi_long ret = 0;
4355
4356    if (msgsz < 0) {
4357        return -TARGET_EINVAL;
4358    }
4359
4360    if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4361        return -TARGET_EFAULT;
4362    host_mb = g_try_malloc(msgsz + sizeof(long));
4363    if (!host_mb) {
4364        unlock_user_struct(target_mb, msgp, 0);
4365        return -TARGET_ENOMEM;
4366    }
4367    host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4368    memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4369    ret = -TARGET_ENOSYS;
4370#ifdef __NR_msgsnd
4371    ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4372#endif
4373#ifdef __NR_ipc
4374    if (ret == -TARGET_ENOSYS) {
4375#ifdef __s390x__
4376        ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4377                                 host_mb));
4378#else
4379        ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4380                                 host_mb, 0));
4381#endif
4382    }
4383#endif
4384    g_free(host_mb);
4385    unlock_user_struct(target_mb, msgp, 0);
4386
4387    return ret;
4388}
4389
4390#ifdef __NR_ipc
4391#if defined(__sparc__)
4392/* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4393#define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4394#elif defined(__s390x__)
4395/* The s390 sys_ipc variant has only five parameters.  */
4396#define MSGRCV_ARGS(__msgp, __msgtyp) \
4397    ((long int[]){(long int)__msgp, __msgtyp})
4398#else
4399#define MSGRCV_ARGS(__msgp, __msgtyp) \
4400    ((long int[]){(long int)__msgp, __msgtyp}), 0
4401#endif
4402#endif
4403
4404static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4405                                 ssize_t msgsz, abi_long msgtyp,
4406                                 int msgflg)
4407{
4408    struct target_msgbuf *target_mb;
4409    char *target_mtext;
4410    struct msgbuf *host_mb;
4411    abi_long ret = 0;
4412
4413    if (msgsz < 0) {
4414        return -TARGET_EINVAL;
4415    }
4416
4417    if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4418        return -TARGET_EFAULT;
4419
4420    host_mb = g_try_malloc(msgsz + sizeof(long));
4421    if (!host_mb) {
4422        ret = -TARGET_ENOMEM;
4423        goto end;
4424    }
4425    ret = -TARGET_ENOSYS;
4426#ifdef __NR_msgrcv
4427    ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4428#endif
4429#ifdef __NR_ipc
4430    if (ret == -TARGET_ENOSYS) {
4431        ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4432                        msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4433    }
4434#endif
4435
4436    if (ret > 0) {
4437        abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4438        target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4439        if (!target_mtext) {
4440            ret = -TARGET_EFAULT;
4441            goto end;
4442        }
4443        memcpy(target_mb->mtext, host_mb->mtext, ret);
4444        unlock_user(target_mtext, target_mtext_addr, ret);
4445    }
4446
4447    target_mb->mtype = tswapal(host_mb->mtype);
4448
4449end:
4450    if (target_mb)
4451        unlock_user_struct(target_mb, msgp, 1);
4452    g_free(host_mb);
4453    return ret;
4454}
4455
4456static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4457                                               abi_ulong target_addr)
4458{
4459    struct target_shmid_ds *target_sd;
4460
4461    if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4462        return -TARGET_EFAULT;
4463    if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4464        return -TARGET_EFAULT;
4465    __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4466    __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4467    __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4468    __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4469    __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4470    __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4471    __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4472    unlock_user_struct(target_sd, target_addr, 0);
4473    return 0;
4474}
4475
4476static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4477                                               struct shmid_ds *host_sd)
4478{
4479    struct target_shmid_ds *target_sd;
4480
4481    if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4482        return -TARGET_EFAULT;
4483    if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4484        return -TARGET_EFAULT;
4485    __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4486    __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4487    __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4488    __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4489    __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4490    __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4491    __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4492    unlock_user_struct(target_sd, target_addr, 1);
4493    return 0;
4494}
4495
4496struct  target_shminfo {
4497    abi_ulong shmmax;
4498    abi_ulong shmmin;
4499    abi_ulong shmmni;
4500    abi_ulong shmseg;
4501    abi_ulong shmall;
4502};
4503
4504static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4505                                              struct shminfo *host_shminfo)
4506{
4507    struct target_shminfo *target_shminfo;
4508    if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4509        return -TARGET_EFAULT;
4510    __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4511    __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4512    __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4513    __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4514    __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4515    unlock_user_struct(target_shminfo, target_addr, 1);
4516    return 0;
4517}
4518
4519struct target_shm_info {
4520    int used_ids;
4521    abi_ulong shm_tot;
4522    abi_ulong shm_rss;
4523    abi_ulong shm_swp;
4524    abi_ulong swap_attempts;
4525    abi_ulong swap_successes;
4526};
4527
4528static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4529                                               struct shm_info *host_shm_info)
4530{
4531    struct target_shm_info *target_shm_info;
4532    if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4533        return -TARGET_EFAULT;
4534    __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4535    __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4536    __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4537    __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4538    __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4539    __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4540    unlock_user_struct(target_shm_info, target_addr, 1);
4541    return 0;
4542}
4543
4544static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4545{
4546    struct shmid_ds dsarg;
4547    struct shminfo shminfo;
4548    struct shm_info shm_info;
4549    abi_long ret = -TARGET_EINVAL;
4550
4551    cmd &= 0xff;
4552
4553    switch(cmd) {
4554    case IPC_STAT:
4555    case IPC_SET:
4556    case SHM_STAT:
4557        if (target_to_host_shmid_ds(&dsarg, buf))
4558            return -TARGET_EFAULT;
4559        ret = get_errno(shmctl(shmid, cmd, &dsarg));
4560        if (host_to_target_shmid_ds(buf, &dsarg))
4561            return -TARGET_EFAULT;
4562        break;
4563    case IPC_INFO:
4564        ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4565        if (host_to_target_shminfo(buf, &shminfo))
4566            return -TARGET_EFAULT;
4567        break;
4568    case SHM_INFO:
4569        ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4570        if (host_to_target_shm_info(buf, &shm_info))
4571            return -TARGET_EFAULT;
4572        break;
4573    case IPC_RMID:
4574    case SHM_LOCK:
4575    case SHM_UNLOCK:
4576        ret = get_errno(shmctl(shmid, cmd, NULL));
4577        break;
4578    }
4579
4580    return ret;
4581}
4582
4583#ifndef TARGET_FORCE_SHMLBA
4584/* For most architectures, SHMLBA is the same as the page size;
4585 * some architectures have larger values, in which case they should
4586 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4587 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4588 * and defining its own value for SHMLBA.
4589 *
4590 * The kernel also permits SHMLBA to be set by the architecture to a
4591 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4592 * this means that addresses are rounded to the large size if
4593 * SHM_RND is set but addresses not aligned to that size are not rejected
4594 * as long as they are at least page-aligned. Since the only architecture
4595 * which uses this is ia64 this code doesn't provide for that oddity.
4596 */
4597static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4598{
4599    return TARGET_PAGE_SIZE;
4600}
4601#endif
4602
4603static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4604                                 int shmid, abi_ulong shmaddr, int shmflg)
4605{
4606    abi_long raddr;
4607    void *host_raddr;
4608    struct shmid_ds shm_info;
4609    int i,ret;
4610    abi_ulong shmlba;
4611
4612    /* shmat pointers are always untagged */
4613
4614    /* find out the length of the shared memory segment */
4615    ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4616    if (is_error(ret)) {
4617        /* can't get length, bail out */
4618        return ret;
4619    }
4620
4621    shmlba = target_shmlba(cpu_env);
4622
4623    if (shmaddr & (shmlba - 1)) {
4624        if (shmflg & SHM_RND) {
4625            shmaddr &= ~(shmlba - 1);
4626        } else {
4627            return -TARGET_EINVAL;
4628        }
4629    }
4630    if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4631        return -TARGET_EINVAL;
4632    }
4633
4634    mmap_lock();
4635
4636    if (shmaddr)
4637        host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4638    else {
4639        abi_ulong mmap_start;
4640
4641        /* In order to use the host shmat, we need to honor host SHMLBA.  */
4642        mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4643
4644        if (mmap_start == -1) {
4645            errno = ENOMEM;
4646            host_raddr = (void *)-1;
4647        } else
4648            host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4649                               shmflg | SHM_REMAP);
4650    }
4651
4652    if (host_raddr == (void *)-1) {
4653        mmap_unlock();
4654        return get_errno((long)host_raddr);
4655    }
4656    raddr=h2g((unsigned long)host_raddr);
4657
4658    page_set_flags(raddr, raddr + shm_info.shm_segsz,
4659                   PAGE_VALID | PAGE_RESET | PAGE_READ |
4660                   (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4661
4662    for (i = 0; i < N_SHM_REGIONS; i++) {
4663        if (!shm_regions[i].in_use) {
4664            shm_regions[i].in_use = true;
4665            shm_regions[i].start = raddr;
4666            shm_regions[i].size = shm_info.shm_segsz;
4667            break;
4668        }
4669    }
4670
4671    mmap_unlock();
4672    return raddr;
4673
4674}
4675
4676static inline abi_long do_shmdt(abi_ulong shmaddr)
4677{
4678    int i;
4679    abi_long rv;
4680
4681    /* shmdt pointers are always untagged */
4682
4683    mmap_lock();
4684
4685    for (i = 0; i < N_SHM_REGIONS; ++i) {
4686        if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4687            shm_regions[i].in_use = false;
4688            page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4689            break;
4690        }
4691    }
4692    rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4693
4694    mmap_unlock();
4695
4696    return rv;
4697}
4698
4699#ifdef TARGET_NR_ipc
4700/* ??? This only works with linear mappings.  */
4701/* do_ipc() must return target values and target errnos. */
4702static abi_long do_ipc(CPUArchState *cpu_env,
4703                       unsigned int call, abi_long first,
4704                       abi_long second, abi_long third,
4705                       abi_long ptr, abi_long fifth)
4706{
4707    int version;
4708    abi_long ret = 0;
4709
4710    version = call >> 16;
4711    call &= 0xffff;
4712
4713    switch (call) {
4714    case IPCOP_semop:
4715        ret = do_semtimedop(first, ptr, second, 0, false);
4716        break;
4717    case IPCOP_semtimedop:
4718    /*
4719     * The s390 sys_ipc variant has only five parameters instead of six
4720     * (as for default variant) and the only difference is the handling of
4721     * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4722     * to a struct timespec where the generic variant uses fifth parameter.
4723     */
4724#if defined(TARGET_S390X)
4725        ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4726#else
4727        ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4728#endif
4729        break;
4730
4731    case IPCOP_semget:
4732        ret = get_errno(semget(first, second, third));
4733        break;
4734
4735    case IPCOP_semctl: {
4736        /* The semun argument to semctl is passed by value, so dereference the
4737         * ptr argument. */
4738        abi_ulong atptr;
4739        get_user_ual(atptr, ptr);
4740        ret = do_semctl(first, second, third, atptr);
4741        break;
4742    }
4743
4744    case IPCOP_msgget:
4745        ret = get_errno(msgget(first, second));
4746        break;
4747
4748    case IPCOP_msgsnd:
4749        ret = do_msgsnd(first, ptr, second, third);
4750        break;
4751
4752    case IPCOP_msgctl:
4753        ret = do_msgctl(first, second, ptr);
4754        break;
4755
4756    case IPCOP_msgrcv:
4757        switch (version) {
4758        case 0:
4759            {
4760                struct target_ipc_kludge {
4761                    abi_long msgp;
4762                    abi_long msgtyp;
4763                } *tmp;
4764
4765                if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4766                    ret = -TARGET_EFAULT;
4767                    break;
4768                }
4769
4770                ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4771
4772                unlock_user_struct(tmp, ptr, 0);
4773                break;
4774            }
4775        default:
4776            ret = do_msgrcv(first, ptr, second, fifth, third);
4777        }
4778        break;
4779
4780    case IPCOP_shmat:
4781        switch (version) {
4782        default:
4783        {
4784            abi_ulong raddr;
4785            raddr = do_shmat(cpu_env, first, ptr, second);
4786            if (is_error(raddr))
4787                return get_errno(raddr);
4788            if (put_user_ual(raddr, third))
4789                return -TARGET_EFAULT;
4790            break;
4791        }
4792        case 1:
4793            ret = -TARGET_EINVAL;
4794            break;
4795        }
4796        break;
4797    case IPCOP_shmdt:
4798        ret = do_shmdt(ptr);
4799        break;
4800
4801    case IPCOP_shmget:
4802        /* IPC_* flag values are the same on all linux platforms */
4803        ret = get_errno(shmget(first, second, third));
4804        break;
4805
4806        /* IPC_* and SHM_* command values are the same on all linux platforms */
4807    case IPCOP_shmctl:
4808        ret = do_shmctl(first, second, ptr);
4809        break;
4810    default:
4811        qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4812                      call, version);
4813        ret = -TARGET_ENOSYS;
4814        break;
4815    }
4816    return ret;
4817}
4818#endif
4819
4820/* kernel structure types definitions */
4821
4822#define STRUCT(name, ...) STRUCT_ ## name,
4823#define STRUCT_SPECIAL(name) STRUCT_ ## name,
4824enum {
4825#include "syscall_types.h"
4826STRUCT_MAX
4827};
4828#undef STRUCT
4829#undef STRUCT_SPECIAL
4830
4831#define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4832#define STRUCT_SPECIAL(name)
4833#include "syscall_types.h"
4834#undef STRUCT
4835#undef STRUCT_SPECIAL
4836
4837#define MAX_STRUCT_SIZE 4096
4838
4839#ifdef CONFIG_FIEMAP
4840/* So fiemap access checks don't overflow on 32 bit systems.
4841 * This is very slightly smaller than the limit imposed by
4842 * the underlying kernel.
4843 */
4844#define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4845                            / sizeof(struct fiemap_extent))
4846
4847static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4848                                       int fd, int cmd, abi_long arg)
4849{
4850    /* The parameter for this ioctl is a struct fiemap followed
4851     * by an array of struct fiemap_extent whose size is set
4852     * in fiemap->fm_extent_count. The array is filled in by the
4853     * ioctl.
4854     */
4855    int target_size_in, target_size_out;
4856    struct fiemap *fm;
4857    const argtype *arg_type = ie->arg_type;
4858    const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4859    void *argptr, *p;
4860    abi_long ret;
4861    int i, extent_size = thunk_type_size(extent_arg_type, 0);
4862    uint32_t outbufsz;
4863    int free_fm = 0;
4864
4865    assert(arg_type[0] == TYPE_PTR);
4866    assert(ie->access == IOC_RW);
4867    arg_type++;
4868    target_size_in = thunk_type_size(arg_type, 0);
4869    argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4870    if (!argptr) {
4871        return -TARGET_EFAULT;
4872    }
4873    thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4874    unlock_user(argptr, arg, 0);
4875    fm = (struct fiemap *)buf_temp;
4876    if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4877        return -TARGET_EINVAL;
4878    }
4879
4880    outbufsz = sizeof (*fm) +
4881        (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4882
4883    if (outbufsz > MAX_STRUCT_SIZE) {
4884        /* We can't fit all the extents into the fixed size buffer.
4885         * Allocate one that is large enough and use it instead.
4886         */
4887        fm = g_try_malloc(outbufsz);
4888        if (!fm) {
4889            return -TARGET_ENOMEM;
4890        }
4891        memcpy(fm, buf_temp, sizeof(struct fiemap));
4892        free_fm = 1;
4893    }
4894    ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4895    if (!is_error(ret)) {
4896        target_size_out = target_size_in;
4897        /* An extent_count of 0 means we were only counting the extents
4898         * so there are no structs to copy
4899         */
4900        if (fm->fm_extent_count != 0) {
4901            target_size_out += fm->fm_mapped_extents * extent_size;
4902        }
4903        argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4904        if (!argptr) {
4905            ret = -TARGET_EFAULT;
4906        } else {
4907            /* Convert the struct fiemap */
4908            thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4909            if (fm->fm_extent_count != 0) {
4910                p = argptr + target_size_in;
4911                /* ...and then all the struct fiemap_extents */
4912                for (i = 0; i < fm->fm_mapped_extents; i++) {
4913                    thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4914                                  THUNK_TARGET);
4915                    p += extent_size;
4916                }
4917            }
4918            unlock_user(argptr, arg, target_size_out);
4919        }
4920    }
4921    if (free_fm) {
4922        g_free(fm);
4923    }
4924    return ret;
4925}
4926#endif
4927
4928static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4929                                int fd, int cmd, abi_long arg)
4930{
4931    const argtype *arg_type = ie->arg_type;
4932    int target_size;
4933    void *argptr;
4934    int ret;
4935    struct ifconf *host_ifconf;
4936    uint32_t outbufsz;
4937    const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4938    const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4939    int target_ifreq_size;
4940    int nb_ifreq;
4941    int free_buf = 0;
4942    int i;
4943    int target_ifc_len;
4944    abi_long target_ifc_buf;
4945    int host_ifc_len;
4946    char *host_ifc_buf;
4947
4948    assert(arg_type[0] == TYPE_PTR);
4949    assert(ie->access == IOC_RW);
4950
4951    arg_type++;
4952    target_size = thunk_type_size(arg_type, 0);
4953
4954    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4955    if (!argptr)
4956        return -TARGET_EFAULT;
4957    thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4958    unlock_user(argptr, arg, 0);
4959
4960    host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4961    target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4962    target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4963
4964    if (target_ifc_buf != 0) {
4965        target_ifc_len = host_ifconf->ifc_len;
4966        nb_ifreq = target_ifc_len / target_ifreq_size;
4967        host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4968
4969        outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4970        if (outbufsz > MAX_STRUCT_SIZE) {
4971            /*
4972             * We can't fit all the extents into the fixed size buffer.
4973             * Allocate one that is large enough and use it instead.
4974             */
4975            host_ifconf = malloc(outbufsz);
4976            if (!host_ifconf) {
4977                return -TARGET_ENOMEM;
4978            }
4979            memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4980            free_buf = 1;
4981        }
4982        host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4983
4984        host_ifconf->ifc_len = host_ifc_len;
4985    } else {
4986      host_ifc_buf = NULL;
4987    }
4988    host_ifconf->ifc_buf = host_ifc_buf;
4989
4990    ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4991    if (!is_error(ret)) {
4992        /* convert host ifc_len to target ifc_len */
4993
4994        nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4995        target_ifc_len = nb_ifreq * target_ifreq_size;
4996        host_ifconf->ifc_len = target_ifc_len;
4997
4998        /* restore target ifc_buf */
4999
5000        host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5001
5002        /* copy struct ifconf to target user */
5003
5004        argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5005        if (!argptr)
5006            return -TARGET_EFAULT;
5007        thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5008        unlock_user(argptr, arg, target_size);
5009
5010        if (target_ifc_buf != 0) {
5011            /* copy ifreq[] to target user */
5012            argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5013            for (i = 0; i < nb_ifreq ; i++) {
5014                thunk_convert(argptr + i * target_ifreq_size,
5015                              host_ifc_buf + i * sizeof(struct ifreq),
5016                              ifreq_arg_type, THUNK_TARGET);
5017            }
5018            unlock_user(argptr, target_ifc_buf, target_ifc_len);
5019        }
5020    }
5021
5022    if (free_buf) {
5023        free(host_ifconf);
5024    }
5025
5026    return ret;
5027}
5028
5029#if defined(CONFIG_USBFS)
5030#if HOST_LONG_BITS > 64
5031#error USBDEVFS thunks do not support >64 bit hosts yet.
5032#endif
5033struct live_urb {
5034    uint64_t target_urb_adr;
5035    uint64_t target_buf_adr;
5036    char *target_buf_ptr;
5037    struct usbdevfs_urb host_urb;
5038};
5039
5040static GHashTable *usbdevfs_urb_hashtable(void)
5041{
5042    static GHashTable *urb_hashtable;
5043
5044    if (!urb_hashtable) {
5045        urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5046    }
5047    return urb_hashtable;
5048}
5049
5050static void urb_hashtable_insert(struct live_urb *urb)
5051{
5052    GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5053    g_hash_table_insert(urb_hashtable, urb, urb);
5054}
5055
5056static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5057{
5058    GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5059    return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5060}
5061
5062static void urb_hashtable_remove(struct live_urb *urb)
5063{
5064    GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5065    g_hash_table_remove(urb_hashtable, urb);
5066}
5067
5068static abi_long
5069do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5070                          int fd, int cmd, abi_long arg)
5071{
5072    const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5073    const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5074    struct live_urb *lurb;
5075    void *argptr;
5076    uint64_t hurb;
5077    int target_size;
5078    uintptr_t target_urb_adr;
5079    abi_long ret;
5080
5081    target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5082
5083    memset(buf_temp, 0, sizeof(uint64_t));
5084    ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5085    if (is_error(ret)) {
5086        return ret;
5087    }
5088
5089    memcpy(&hurb, buf_temp, sizeof(uint64_t));
5090    lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5091    if (!lurb->target_urb_adr) {
5092        return -TARGET_EFAULT;
5093    }
5094    urb_hashtable_remove(lurb);
5095    unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5096        lurb->host_urb.buffer_length);
5097    lurb->target_buf_ptr = NULL;
5098
5099    /* restore the guest buffer pointer */
5100    lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5101
5102    /* update the guest urb struct */
5103    argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5104    if (!argptr) {
5105        g_free(lurb);
5106        return -TARGET_EFAULT;
5107    }
5108    thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5109    unlock_user(argptr, lurb->target_urb_adr, target_size);
5110
5111    target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5112    /* write back the urb handle */
5113    argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5114    if (!argptr) {
5115        g_free(lurb);
5116        return -TARGET_EFAULT;
5117    }
5118
5119    /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5120    target_urb_adr = lurb->target_urb_adr;
5121    thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5122    unlock_user(argptr, arg, target_size);
5123
5124    g_free(lurb);
5125    return ret;
5126}
5127
5128static abi_long
5129do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5130                             uint8_t *buf_temp __attribute__((unused)),
5131                             int fd, int cmd, abi_long arg)
5132{
5133    struct live_urb *lurb;
5134
5135    /* map target address back to host URB with metadata. */
5136    lurb = urb_hashtable_lookup(arg);
5137    if (!lurb) {
5138        return -TARGET_EFAULT;
5139    }
5140    return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5141}
5142
5143static abi_long
5144do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5145                            int fd, int cmd, abi_long arg)
5146{
5147    const argtype *arg_type = ie->arg_type;
5148    int target_size;
5149    abi_long ret;
5150    void *argptr;
5151    int rw_dir;
5152    struct live_urb *lurb;
5153
5154    /*
5155     * each submitted URB needs to map to a unique ID for the
5156     * kernel, and that unique ID needs to be a pointer to
5157     * host memory.  hence, we need to malloc for each URB.
5158     * isochronous transfers have a variable length struct.
5159     */
5160    arg_type++;
5161    target_size = thunk_type_size(arg_type, THUNK_TARGET);
5162
5163    /* construct host copy of urb and metadata */
5164    lurb = g_try_malloc0(sizeof(struct live_urb));
5165    if (!lurb) {
5166        return -TARGET_ENOMEM;
5167    }
5168
5169    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5170    if (!argptr) {
5171        g_free(lurb);
5172        return -TARGET_EFAULT;
5173    }
5174    thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5175    unlock_user(argptr, arg, 0);
5176
5177    lurb->target_urb_adr = arg;
5178    lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5179
5180    /* buffer space used depends on endpoint type so lock the entire buffer */
5181    /* control type urbs should check the buffer contents for true direction */
5182    rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5183    lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5184        lurb->host_urb.buffer_length, 1);
5185    if (lurb->target_buf_ptr == NULL) {
5186        g_free(lurb);
5187        return -TARGET_EFAULT;
5188    }
5189
5190    /* update buffer pointer in host copy */
5191    lurb->host_urb.buffer = lurb->target_buf_ptr;
5192
5193    ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5194    if (is_error(ret)) {
5195        unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5196        g_free(lurb);
5197    } else {
5198        urb_hashtable_insert(lurb);
5199    }
5200
5201    return ret;
5202}
5203#endif /* CONFIG_USBFS */
5204
5205static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5206                            int cmd, abi_long arg)
5207{
5208    void *argptr;
5209    struct dm_ioctl *host_dm;
5210    abi_long guest_data;
5211    uint32_t guest_data_size;
5212    int target_size;
5213    const argtype *arg_type = ie->arg_type;
5214    abi_long ret;
5215    void *big_buf = NULL;
5216    char *host_data;
5217
5218    arg_type++;
5219    target_size = thunk_type_size(arg_type, 0);
5220    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5221    if (!argptr) {
5222        ret = -TARGET_EFAULT;
5223        goto out;
5224    }
5225    thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5226    unlock_user(argptr, arg, 0);
5227
5228    /* buf_temp is too small, so fetch things into a bigger buffer */
5229    big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5230    memcpy(big_buf, buf_temp, target_size);
5231    buf_temp = big_buf;
5232    host_dm = big_buf;
5233
5234    guest_data = arg + host_dm->data_start;
5235    if ((guest_data - arg) < 0) {
5236        ret = -TARGET_EINVAL;
5237        goto out;
5238    }
5239    guest_data_size = host_dm->data_size - host_dm->data_start;
5240    host_data = (char*)host_dm + host_dm->data_start;
5241
5242    argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5243    if (!argptr) {
5244        ret = -TARGET_EFAULT;
5245        goto out;
5246    }
5247
5248    switch (ie->host_cmd) {
5249    case DM_REMOVE_ALL:
5250    case DM_LIST_DEVICES:
5251    case DM_DEV_CREATE:
5252    case DM_DEV_REMOVE:
5253    case DM_DEV_SUSPEND:
5254    case DM_DEV_STATUS:
5255    case DM_DEV_WAIT:
5256    case DM_TABLE_STATUS:
5257    case DM_TABLE_CLEAR:
5258    case DM_TABLE_DEPS:
5259    case DM_LIST_VERSIONS:
5260        /* no input data */
5261        break;
5262    case DM_DEV_RENAME:
5263    case DM_DEV_SET_GEOMETRY:
5264        /* data contains only strings */
5265        memcpy(host_data, argptr, guest_data_size);
5266        break;
5267    case DM_TARGET_MSG:
5268        memcpy(host_data, argptr, guest_data_size);
5269        *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5270        break;
5271    case DM_TABLE_LOAD:
5272    {
5273        void *gspec = argptr;
5274        void *cur_data = host_data;
5275        const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5276        int spec_size = thunk_type_size(arg_type, 0);
5277        int i;
5278
5279        for (i = 0; i < host_dm->target_count; i++) {
5280            struct dm_target_spec *spec = cur_data;
5281            uint32_t next;
5282            int slen;
5283
5284            thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5285            slen = strlen((char*)gspec + spec_size) + 1;
5286            next = spec->next;
5287            spec->next = sizeof(*spec) + slen;
5288            strcpy((char*)&spec[1], gspec + spec_size);
5289            gspec += next;
5290            cur_data += spec->next;
5291        }
5292        break;
5293    }
5294    default:
5295        ret = -TARGET_EINVAL;
5296        unlock_user(argptr, guest_data, 0);
5297        goto out;
5298    }
5299    unlock_user(argptr, guest_data, 0);
5300
5301    ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5302    if (!is_error(ret)) {
5303        guest_data = arg + host_dm->data_start;
5304        guest_data_size = host_dm->data_size - host_dm->data_start;
5305        argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5306        switch (ie->host_cmd) {
5307        case DM_REMOVE_ALL:
5308        case DM_DEV_CREATE:
5309        case DM_DEV_REMOVE:
5310        case DM_DEV_RENAME:
5311        case DM_DEV_SUSPEND:
5312        case DM_DEV_STATUS:
5313        case DM_TABLE_LOAD:
5314        case DM_TABLE_CLEAR:
5315        case DM_TARGET_MSG:
5316        case DM_DEV_SET_GEOMETRY:
5317            /* no return data */
5318            break;
5319        case DM_LIST_DEVICES:
5320        {
5321            struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5322            uint32_t remaining_data = guest_data_size;
5323            void *cur_data = argptr;
5324            const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5325            int nl_size = 12; /* can't use thunk_size due to alignment */
5326
5327            while (1) {
5328                uint32_t next = nl->next;
5329                if (next) {
5330                    nl->next = nl_size + (strlen(nl->name) + 1);
5331                }
5332                if (remaining_data < nl->next) {
5333                    host_dm->flags |= DM_BUFFER_FULL_FLAG;
5334                    break;
5335                }
5336                thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5337                strcpy(cur_data + nl_size, nl->name);
5338                cur_data += nl->next;
5339                remaining_data -= nl->next;
5340                if (!next) {
5341                    break;
5342                }
5343                nl = (void*)nl + next;
5344            }
5345            break;
5346        }
5347        case DM_DEV_WAIT:
5348        case DM_TABLE_STATUS:
5349        {
5350            struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5351            void *cur_data = argptr;
5352            const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5353            int spec_size = thunk_type_size(arg_type, 0);
5354            int i;
5355
5356            for (i = 0; i < host_dm->target_count; i++) {
5357                uint32_t next = spec->next;
5358                int slen = strlen((char*)&spec[1]) + 1;
5359                spec->next = (cur_data - argptr) + spec_size + slen;
5360                if (guest_data_size < spec->next) {
5361                    host_dm->flags |= DM_BUFFER_FULL_FLAG;
5362                    break;
5363                }
5364                thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5365                strcpy(cur_data + spec_size, (char*)&spec[1]);
5366                cur_data = argptr + spec->next;
5367                spec = (void*)host_dm + host_dm->data_start + next;
5368            }
5369            break;
5370        }
5371        case DM_TABLE_DEPS:
5372        {
5373            void *hdata = (void*)host_dm + host_dm->data_start;
5374            int count = *(uint32_t*)hdata;
5375            uint64_t *hdev = hdata + 8;
5376            uint64_t *gdev = argptr + 8;
5377            int i;
5378
5379            *(uint32_t*)argptr = tswap32(count);
5380            for (i = 0; i < count; i++) {
5381                *gdev = tswap64(*hdev);
5382                gdev++;
5383                hdev++;
5384            }
5385            break;
5386        }
5387        case DM_LIST_VERSIONS:
5388        {
5389            struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5390            uint32_t remaining_data = guest_data_size;
5391            void *cur_data = argptr;
5392            const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5393            int vers_size = thunk_type_size(arg_type, 0);
5394
5395            while (1) {
5396                uint32_t next = vers->next;
5397                if (next) {
5398                    vers->next = vers_size + (strlen(vers->name) + 1);
5399                }
5400                if (remaining_data < vers->next) {
5401                    host_dm->flags |= DM_BUFFER_FULL_FLAG;
5402                    break;
5403                }
5404                thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5405                strcpy(cur_data + vers_size, vers->name);
5406                cur_data += vers->next;
5407                remaining_data -= vers->next;
5408                if (!next) {
5409                    break;
5410                }
5411                vers = (void*)vers + next;
5412            }
5413            break;
5414        }
5415        default:
5416            unlock_user(argptr, guest_data, 0);
5417            ret = -TARGET_EINVAL;
5418            goto out;
5419        }
5420        unlock_user(argptr, guest_data, guest_data_size);
5421
5422        argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5423        if (!argptr) {
5424            ret = -TARGET_EFAULT;
5425            goto out;
5426        }
5427        thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5428        unlock_user(argptr, arg, target_size);
5429    }
5430out:
5431    g_free(big_buf);
5432    return ret;
5433}
5434
5435static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5436                               int cmd, abi_long arg)
5437{
5438    void *argptr;
5439    int target_size;
5440    const argtype *arg_type = ie->arg_type;
5441    const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5442    abi_long ret;
5443
5444    struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5445    struct blkpg_partition host_part;
5446
5447    /* Read and convert blkpg */
5448    arg_type++;
5449    target_size = thunk_type_size(arg_type, 0);
5450    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5451    if (!argptr) {
5452        ret = -TARGET_EFAULT;
5453        goto out;
5454    }
5455    thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5456    unlock_user(argptr, arg, 0);
5457
5458    switch (host_blkpg->op) {
5459    case BLKPG_ADD_PARTITION:
5460    case BLKPG_DEL_PARTITION:
5461        /* payload is struct blkpg_partition */
5462        break;
5463    default:
5464        /* Unknown opcode */
5465        ret = -TARGET_EINVAL;
5466        goto out;
5467    }
5468
5469    /* Read and convert blkpg->data */
5470    arg = (abi_long)(uintptr_t)host_blkpg->data;
5471    target_size = thunk_type_size(part_arg_type, 0);
5472    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5473    if (!argptr) {
5474        ret = -TARGET_EFAULT;
5475        goto out;
5476    }
5477    thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5478    unlock_user(argptr, arg, 0);
5479
5480    /* Swizzle the data pointer to our local copy and call! */
5481    host_blkpg->data = &host_part;
5482    ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5483
5484out:
5485    return ret;
5486}
5487
5488static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5489                                int fd, int cmd, abi_long arg)
5490{
5491    const argtype *arg_type = ie->arg_type;
5492    const StructEntry *se;
5493    const argtype *field_types;
5494    const int *dst_offsets, *src_offsets;
5495    int target_size;
5496    void *argptr;
5497    abi_ulong *target_rt_dev_ptr = NULL;
5498    unsigned long *host_rt_dev_ptr = NULL;
5499    abi_long ret;
5500    int i;
5501
5502    assert(ie->access == IOC_W);
5503    assert(*arg_type == TYPE_PTR);
5504    arg_type++;
5505    assert(*arg_type == TYPE_STRUCT);
5506    target_size = thunk_type_size(arg_type, 0);
5507    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5508    if (!argptr) {
5509        return -TARGET_EFAULT;
5510    }
5511    arg_type++;
5512    assert(*arg_type == (int)STRUCT_rtentry);
5513    se = struct_entries + *arg_type++;
5514    assert(se->convert[0] == NULL);
5515    /* convert struct here to be able to catch rt_dev string */
5516    field_types = se->field_types;
5517    dst_offsets = se->field_offsets[THUNK_HOST];
5518    src_offsets = se->field_offsets[THUNK_TARGET];
5519    for (i = 0; i < se->nb_fields; i++) {
5520        if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5521            assert(*field_types == TYPE_PTRVOID);
5522            target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5523            host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5524            if (*target_rt_dev_ptr != 0) {
5525                *host_rt_dev_ptr = (unsigned long)lock_user_string(
5526                                                  tswapal(*target_rt_dev_ptr));
5527                if (!*host_rt_dev_ptr) {
5528                    unlock_user(argptr, arg, 0);
5529                    return -TARGET_EFAULT;
5530                }
5531            } else {
5532                *host_rt_dev_ptr = 0;
5533            }
5534            field_types++;
5535            continue;
5536        }
5537        field_types = thunk_convert(buf_temp + dst_offsets[i],
5538                                    argptr + src_offsets[i],
5539                                    field_types, THUNK_HOST);
5540    }
5541    unlock_user(argptr, arg, 0);
5542
5543    ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5544
5545    assert(host_rt_dev_ptr != NULL);
5546    assert(target_rt_dev_ptr != NULL);
5547    if (*host_rt_dev_ptr != 0) {
5548        unlock_user((void *)*host_rt_dev_ptr,
5549                    *target_rt_dev_ptr, 0);
5550    }
5551    return ret;
5552}
5553
5554static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5555                                     int fd, int cmd, abi_long arg)
5556{
5557    int sig = target_to_host_signal(arg);
5558    return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5559}
5560
5561static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5562                                    int fd, int cmd, abi_long arg)
5563{
5564    struct timeval tv;
5565    abi_long ret;
5566
5567    ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5568    if (is_error(ret)) {
5569        return ret;
5570    }
5571
5572    if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5573        if (copy_to_user_timeval(arg, &tv)) {
5574            return -TARGET_EFAULT;
5575        }
5576    } else {
5577        if (copy_to_user_timeval64(arg, &tv)) {
5578            return -TARGET_EFAULT;
5579        }
5580    }
5581
5582    return ret;
5583}
5584
5585static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5586                                      int fd, int cmd, abi_long arg)
5587{
5588    struct timespec ts;
5589    abi_long ret;
5590
5591    ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5592    if (is_error(ret)) {
5593        return ret;
5594    }
5595
5596    if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5597        if (host_to_target_timespec(arg, &ts)) {
5598            return -TARGET_EFAULT;
5599        }
5600    } else{
5601        if (host_to_target_timespec64(arg, &ts)) {
5602            return -TARGET_EFAULT;
5603        }
5604    }
5605
5606    return ret;
5607}
5608
5609#ifdef TIOCGPTPEER
5610static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5611                                     int fd, int cmd, abi_long arg)
5612{
5613    int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5614    return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5615}
5616#endif
5617
5618#ifdef HAVE_DRM_H
5619
5620static void unlock_drm_version(struct drm_version *host_ver,
5621                               struct target_drm_version *target_ver,
5622                               bool copy)
5623{
5624    unlock_user(host_ver->name, target_ver->name,
5625                                copy ? host_ver->name_len : 0);
5626    unlock_user(host_ver->date, target_ver->date,
5627                                copy ? host_ver->date_len : 0);
5628    unlock_user(host_ver->desc, target_ver->desc,
5629                                copy ? host_ver->desc_len : 0);
5630}
5631
5632static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5633                                          struct target_drm_version *target_ver)
5634{
5635    memset(host_ver, 0, sizeof(*host_ver));
5636
5637    __get_user(host_ver->name_len, &target_ver->name_len);
5638    if (host_ver->name_len) {
5639        host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5640                                   target_ver->name_len, 0);
5641        if (!host_ver->name) {
5642            return -EFAULT;
5643        }
5644    }
5645
5646    __get_user(host_ver->date_len, &target_ver->date_len);
5647    if (host_ver->date_len) {
5648        host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5649                                   target_ver->date_len, 0);
5650        if (!host_ver->date) {
5651            goto err;
5652        }
5653    }
5654
5655    __get_user(host_ver->desc_len, &target_ver->desc_len);
5656    if (host_ver->desc_len) {
5657        host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5658                                   target_ver->desc_len, 0);
5659        if (!host_ver->desc) {
5660            goto err;
5661        }
5662    }
5663
5664    return 0;
5665err:
5666    unlock_drm_version(host_ver, target_ver, false);
5667    return -EFAULT;
5668}
5669
5670static inline void host_to_target_drmversion(
5671                                          struct target_drm_version *target_ver,
5672                                          struct drm_version *host_ver)
5673{
5674    __put_user(host_ver->version_major, &target_ver->version_major);
5675    __put_user(host_ver->version_minor, &target_ver->version_minor);
5676    __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5677    __put_user(host_ver->name_len, &target_ver->name_len);
5678    __put_user(host_ver->date_len, &target_ver->date_len);
5679    __put_user(host_ver->desc_len, &target_ver->desc_len);
5680    unlock_drm_version(host_ver, target_ver, true);
5681}
5682
5683static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5684                             int fd, int cmd, abi_long arg)
5685{
5686    struct drm_version *ver;
5687    struct target_drm_version *target_ver;
5688    abi_long ret;
5689
5690    switch (ie->host_cmd) {
5691    case DRM_IOCTL_VERSION:
5692        if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5693            return -TARGET_EFAULT;
5694        }
5695        ver = (struct drm_version *)buf_temp;
5696        ret = target_to_host_drmversion(ver, target_ver);
5697        if (!is_error(ret)) {
5698            ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5699            if (is_error(ret)) {
5700                unlock_drm_version(ver, target_ver, false);
5701            } else {
5702                host_to_target_drmversion(target_ver, ver);
5703            }
5704        }
5705        unlock_user_struct(target_ver, arg, 0);
5706        return ret;
5707    }
5708    return -TARGET_ENOSYS;
5709}
5710
5711static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5712                                           struct drm_i915_getparam *gparam,
5713                                           int fd, abi_long arg)
5714{
5715    abi_long ret;
5716    int value;
5717    struct target_drm_i915_getparam *target_gparam;
5718
5719    if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5720        return -TARGET_EFAULT;
5721    }
5722
5723    __get_user(gparam->param, &target_gparam->param);
5724    gparam->value = &value;
5725    ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5726    put_user_s32(value, target_gparam->value);
5727
5728    unlock_user_struct(target_gparam, arg, 0);
5729    return ret;
5730}
5731
5732static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5733                                  int fd, int cmd, abi_long arg)
5734{
5735    switch (ie->host_cmd) {
5736    case DRM_IOCTL_I915_GETPARAM:
5737        return do_ioctl_drm_i915_getparam(ie,
5738                                          (struct drm_i915_getparam *)buf_temp,
5739                                          fd, arg);
5740    default:
5741        return -TARGET_ENOSYS;
5742    }
5743}
5744
5745#endif
5746
5747static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5748                                        int fd, int cmd, abi_long arg)
5749{
5750    struct tun_filter *filter = (struct tun_filter *)buf_temp;
5751    struct tun_filter *target_filter;
5752    char *target_addr;
5753
5754    assert(ie->access == IOC_W);
5755
5756    target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5757    if (!target_filter) {
5758        return -TARGET_EFAULT;
5759    }
5760    filter->flags = tswap16(target_filter->flags);
5761    filter->count = tswap16(target_filter->count);
5762    unlock_user(target_filter, arg, 0);
5763
5764    if (filter->count) {
5765        if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5766            MAX_STRUCT_SIZE) {
5767            return -TARGET_EFAULT;
5768        }
5769
5770        target_addr = lock_user(VERIFY_READ,
5771                                arg + offsetof(struct tun_filter, addr),
5772                                filter->count * ETH_ALEN, 1);
5773        if (!target_addr) {
5774            return -TARGET_EFAULT;
5775        }
5776        memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5777        unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5778    }
5779
5780    return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5781}
5782
5783IOCTLEntry ioctl_entries[] = {
5784#define IOCTL(cmd, access, ...) \
5785    { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5786#define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5787    { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5788#define IOCTL_IGNORE(cmd) \
5789    { TARGET_ ## cmd, 0, #cmd },
5790#include "ioctls.h"
5791    { 0, 0, },
5792};
5793
5794/* ??? Implement proper locking for ioctls.  */
5795/* do_ioctl() Must return target values and target errnos. */
5796static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5797{
5798    const IOCTLEntry *ie;
5799    const argtype *arg_type;
5800    abi_long ret;
5801    uint8_t buf_temp[MAX_STRUCT_SIZE];
5802    int target_size;
5803    void *argptr;
5804
5805    ie = ioctl_entries;
5806    for(;;) {
5807        if (ie->target_cmd == 0) {
5808            qemu_log_mask(
5809                LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5810            return -TARGET_ENOSYS;
5811        }
5812        if (ie->target_cmd == cmd)
5813            break;
5814        ie++;
5815    }
5816    arg_type = ie->arg_type;
5817    if (ie->do_ioctl) {
5818        return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5819    } else if (!ie->host_cmd) {
5820        /* Some architectures define BSD ioctls in their headers
5821           that are not implemented in Linux.  */
5822        return -TARGET_ENOSYS;
5823    }
5824
5825    switch(arg_type[0]) {
5826    case TYPE_NULL:
5827        /* no argument */
5828        ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5829        break;
5830    case TYPE_PTRVOID:
5831    case TYPE_INT:
5832    case TYPE_LONG:
5833    case TYPE_ULONG:
5834        ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5835        break;
5836    case TYPE_PTR:
5837        arg_type++;
5838        target_size = thunk_type_size(arg_type, 0);
5839        switch(ie->access) {
5840        case IOC_R:
5841            ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5842            if (!is_error(ret)) {
5843                argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5844                if (!argptr)
5845                    return -TARGET_EFAULT;
5846                thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5847                unlock_user(argptr, arg, target_size);
5848            }
5849            break;
5850        case IOC_W:
5851            argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5852            if (!argptr)
5853                return -TARGET_EFAULT;
5854            thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5855            unlock_user(argptr, arg, 0);
5856            ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5857            break;
5858        default:
5859        case IOC_RW:
5860            argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5861            if (!argptr)
5862                return -TARGET_EFAULT;
5863            thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5864            unlock_user(argptr, arg, 0);
5865            ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5866            if (!is_error(ret)) {
5867                argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5868                if (!argptr)
5869                    return -TARGET_EFAULT;
5870                thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5871                unlock_user(argptr, arg, target_size);
5872            }
5873            break;
5874        }
5875        break;
5876    default:
5877        qemu_log_mask(LOG_UNIMP,
5878                      "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5879                      (long)cmd, arg_type[0]);
5880        ret = -TARGET_ENOSYS;
5881        break;
5882    }
5883    return ret;
5884}
5885
5886static const bitmask_transtbl iflag_tbl[] = {
5887        { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5888        { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5889        { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5890        { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5891        { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5892        { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5893        { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5894        { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5895        { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5896        { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5897        { TARGET_IXON, TARGET_IXON, IXON, IXON },
5898        { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5899        { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5900        { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5901        { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5902        { 0, 0, 0, 0 }
5903};
5904
5905static const bitmask_transtbl oflag_tbl[] = {
5906        { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5907        { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5908        { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5909        { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5910        { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5911        { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5912        { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5913        { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5914        { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5915        { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5916        { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5917        { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5918        { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5919        { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5920        { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5921        { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5922        { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5923        { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5924        { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5925        { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5926        { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5927        { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5928        { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5929        { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5930        { 0, 0, 0, 0 }
5931};
5932
5933static const bitmask_transtbl cflag_tbl[] = {
5934        { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5935        { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5936        { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5937        { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5938        { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5939        { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5940        { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5941        { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5942        { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5943        { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5944        { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5945        { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5946        { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5947        { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5948        { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5949        { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5950        { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5951        { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5952        { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5953        { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5954        { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5955        { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5956        { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5957        { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5958        { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5959        { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5960        { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5961        { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5962        { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5963        { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5964        { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5965        { 0, 0, 0, 0 }
5966};
5967
5968static const bitmask_transtbl lflag_tbl[] = {
5969  { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5970  { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5971  { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5972  { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5973  { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5974  { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5975  { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5976  { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5977  { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5978  { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5979  { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5980  { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5981  { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5982  { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5983  { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5984  { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5985  { 0, 0, 0, 0 }
5986};
5987
5988static void target_to_host_termios (void *dst, const void *src)
5989{
5990    struct host_termios *host = dst;
5991    const struct target_termios *target = src;
5992
5993    host->c_iflag =
5994        target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5995    host->c_oflag =
5996        target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5997    host->c_cflag =
5998        target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5999    host->c_lflag =
6000        target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
6001    host->c_line = target->c_line;
6002
6003    memset(host->c_cc, 0, sizeof(host->c_cc));
6004    host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
6005    host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
6006    host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
6007    host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
6008    host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
6009    host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
6010    host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
6011    host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
6012    host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
6013    host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
6014    host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
6015    host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
6016    host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
6017    host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
6018    host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
6019    host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
6020    host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
6021}
6022
6023static void host_to_target_termios (void *dst, const void *src)
6024{
6025    struct target_termios *target = dst;
6026    const struct host_termios *host = src;
6027
6028    target->c_iflag =
6029        tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6030    target->c_oflag =
6031        tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6032    target->c_cflag =
6033        tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6034    target->c_lflag =
6035        tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6036    target->c_line = host->c_line;
6037
6038    memset(target->c_cc, 0, sizeof(target->c_cc));
6039    target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6040    target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6041    target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6042    target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6043    target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6044    target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6045    target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6046    target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6047    target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6048    target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6049    target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6050    target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6051    target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6052    target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6053    target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6054    target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6055    target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6056}
6057
6058static const StructEntry struct_termios_def = {
6059    .convert = { host_to_target_termios, target_to_host_termios },
6060    .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6061    .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6062    .print = print_termios,
6063};
6064
6065static bitmask_transtbl mmap_flags_tbl[] = {
6066    { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6067    { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6068    { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6069    { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6070      MAP_ANONYMOUS, MAP_ANONYMOUS },
6071    { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6072      MAP_GROWSDOWN, MAP_GROWSDOWN },
6073    { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6074      MAP_DENYWRITE, MAP_DENYWRITE },
6075    { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6076      MAP_EXECUTABLE, MAP_EXECUTABLE },
6077    { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6078    { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6079      MAP_NORESERVE, MAP_NORESERVE },
6080    { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6081    /* MAP_STACK had been ignored by the kernel for quite some time.
6082       Recognize it for the target insofar as we do not want to pass
6083       it through to the host.  */
6084    { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6085    { 0, 0, 0, 0 }
6086};
6087
6088/*
6089 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6090 *       TARGET_I386 is defined if TARGET_X86_64 is defined
6091 */
6092#if defined(TARGET_I386)
6093
6094/* NOTE: there is really one LDT for all the threads */
6095static uint8_t *ldt_table;
6096
6097static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6098{
6099    int size;
6100    void *p;
6101
6102    if (!ldt_table)
6103        return 0;
6104    size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6105    if (size > bytecount)
6106        size = bytecount;
6107    p = lock_user(VERIFY_WRITE, ptr, size, 0);
6108    if (!p)
6109        return -TARGET_EFAULT;
6110    /* ??? Should this by byteswapped?  */
6111    memcpy(p, ldt_table, size);
6112    unlock_user(p, ptr, size);
6113    return size;
6114}
6115
6116/* XXX: add locking support */
6117static abi_long write_ldt(CPUX86State *env,
6118                          abi_ulong ptr, unsigned long bytecount, int oldmode)
6119{
6120    struct target_modify_ldt_ldt_s ldt_info;
6121    struct target_modify_ldt_ldt_s *target_ldt_info;
6122    int seg_32bit, contents, read_exec_only, limit_in_pages;
6123    int seg_not_present, useable, lm;
6124    uint32_t *lp, entry_1, entry_2;
6125
6126    if (bytecount != sizeof(ldt_info))
6127        return -TARGET_EINVAL;
6128    if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6129        return -TARGET_EFAULT;
6130    ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6131    ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6132    ldt_info.limit = tswap32(target_ldt_info->limit);
6133    ldt_info.flags = tswap32(target_ldt_info->flags);
6134    unlock_user_struct(target_ldt_info, ptr, 0);
6135
6136    if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6137        return -TARGET_EINVAL;
6138    seg_32bit = ldt_info.flags & 1;
6139    contents = (ldt_info.flags >> 1) & 3;
6140    read_exec_only = (ldt_info.flags >> 3) & 1;
6141    limit_in_pages = (ldt_info.flags >> 4) & 1;
6142    seg_not_present = (ldt_info.flags >> 5) & 1;
6143    useable = (ldt_info.flags >> 6) & 1;
6144#ifdef TARGET_ABI32
6145    lm = 0;
6146#else
6147    lm = (ldt_info.flags >> 7) & 1;
6148#endif
6149    if (contents == 3) {
6150        if (oldmode)
6151            return -TARGET_EINVAL;
6152        if (seg_not_present == 0)
6153            return -TARGET_EINVAL;
6154    }
6155    /* allocate the LDT */
6156    if (!ldt_table) {
6157        env->ldt.base = target_mmap(0,
6158                                    TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6159                                    PROT_READ|PROT_WRITE,
6160                                    MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6161        if (env->ldt.base == -1)
6162            return -TARGET_ENOMEM;
6163        memset(g2h_untagged(env->ldt.base), 0,
6164               TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6165        env->ldt.limit = 0xffff;
6166        ldt_table = g2h_untagged(env->ldt.base);
6167    }
6168
6169    /* NOTE: same code as Linux kernel */
6170    /* Allow LDTs to be cleared by the user. */
6171    if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6172        if (oldmode ||
6173            (contents == 0              &&
6174             read_exec_only == 1        &&
6175             seg_32bit == 0             &&
6176             limit_in_pages == 0        &&
6177             seg_not_present == 1       &&
6178             useable == 0 )) {
6179            entry_1 = 0;
6180            entry_2 = 0;
6181            goto install;
6182        }
6183    }
6184
6185    entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6186        (ldt_info.limit & 0x0ffff);
6187    entry_2 = (ldt_info.base_addr & 0xff000000) |
6188        ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6189        (ldt_info.limit & 0xf0000) |
6190        ((read_exec_only ^ 1) << 9) |
6191        (contents << 10) |
6192        ((seg_not_present ^ 1) << 15) |
6193        (seg_32bit << 22) |
6194        (limit_in_pages << 23) |
6195        (lm << 21) |
6196        0x7000;
6197    if (!oldmode)
6198        entry_2 |= (useable << 20);
6199
6200    /* Install the new entry ...  */
6201install:
6202    lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6203    lp[0] = tswap32(entry_1);
6204    lp[1] = tswap32(entry_2);
6205    return 0;
6206}
6207
6208/* specific and weird i386 syscalls */
6209static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6210                              unsigned long bytecount)
6211{
6212    abi_long ret;
6213
6214    switch (func) {
6215    case 0:
6216        ret = read_ldt(ptr, bytecount);
6217        break;
6218    case 1:
6219        ret = write_ldt(env, ptr, bytecount, 1);
6220        break;
6221    case 0x11:
6222        ret = write_ldt(env, ptr, bytecount, 0);
6223        break;
6224    default:
6225        ret = -TARGET_ENOSYS;
6226        break;
6227    }
6228    return ret;
6229}
6230
6231#if defined(TARGET_ABI32)
6232abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6233{
6234    uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6235    struct target_modify_ldt_ldt_s ldt_info;
6236    struct target_modify_ldt_ldt_s *target_ldt_info;
6237    int seg_32bit, contents, read_exec_only, limit_in_pages;
6238    int seg_not_present, useable, lm;
6239    uint32_t *lp, entry_1, entry_2;
6240    int i;
6241
6242    lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6243    if (!target_ldt_info)
6244        return -TARGET_EFAULT;
6245    ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6246    ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6247    ldt_info.limit = tswap32(target_ldt_info->limit);
6248    ldt_info.flags = tswap32(target_ldt_info->flags);
6249    if (ldt_info.entry_number == -1) {
6250        for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6251            if (gdt_table[i] == 0) {
6252                ldt_info.entry_number = i;
6253                target_ldt_info->entry_number = tswap32(i);
6254                break;
6255            }
6256        }
6257    }
6258    unlock_user_struct(target_ldt_info, ptr, 1);
6259
6260    if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 
6261        ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6262           return -TARGET_EINVAL;
6263    seg_32bit = ldt_info.flags & 1;
6264    contents = (ldt_info.flags >> 1) & 3;
6265    read_exec_only = (ldt_info.flags >> 3) & 1;
6266    limit_in_pages = (ldt_info.flags >> 4) & 1;
6267    seg_not_present = (ldt_info.flags >> 5) & 1;
6268    useable = (ldt_info.flags >> 6) & 1;
6269#ifdef TARGET_ABI32
6270    lm = 0;
6271#else
6272    lm = (ldt_info.flags >> 7) & 1;
6273#endif
6274
6275    if (contents == 3) {
6276        if (seg_not_present == 0)
6277            return -TARGET_EINVAL;
6278    }
6279
6280    /* NOTE: same code as Linux kernel */
6281    /* Allow LDTs to be cleared by the user. */
6282    if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6283        if ((contents == 0             &&
6284             read_exec_only == 1       &&
6285             seg_32bit == 0            &&
6286             limit_in_pages == 0       &&
6287             seg_not_present == 1      &&
6288             useable == 0 )) {
6289            entry_1 = 0;
6290            entry_2 = 0;
6291            goto install;
6292        }
6293    }
6294
6295    entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6296        (ldt_info.limit & 0x0ffff);
6297    entry_2 = (ldt_info.base_addr & 0xff000000) |
6298        ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6299        (ldt_info.limit & 0xf0000) |
6300        ((read_exec_only ^ 1) << 9) |
6301        (contents << 10) |
6302        ((seg_not_present ^ 1) << 15) |
6303        (seg_32bit << 22) |
6304        (limit_in_pages << 23) |
6305        (useable << 20) |
6306        (lm << 21) |
6307        0x7000;
6308
6309    /* Install the new entry ...  */
6310install:
6311    lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6312    lp[0] = tswap32(entry_1);
6313    lp[1] = tswap32(entry_2);
6314    return 0;
6315}
6316
6317static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6318{
6319    struct target_modify_ldt_ldt_s *target_ldt_info;
6320    uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6321    uint32_t base_addr, limit, flags;
6322    int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6323    int seg_not_present, useable, lm;
6324    uint32_t *lp, entry_1, entry_2;
6325
6326    lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6327    if (!target_ldt_info)
6328        return -TARGET_EFAULT;
6329    idx = tswap32(target_ldt_info->entry_number);
6330    if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6331        idx > TARGET_GDT_ENTRY_TLS_MAX) {
6332        unlock_user_struct(target_ldt_info, ptr, 1);
6333        return -TARGET_EINVAL;
6334    }
6335    lp = (uint32_t *)(gdt_table + idx);
6336    entry_1 = tswap32(lp[0]);
6337    entry_2 = tswap32(lp[1]);
6338    
6339    read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6340    contents = (entry_2 >> 10) & 3;
6341    seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6342    seg_32bit = (entry_2 >> 22) & 1;
6343    limit_in_pages = (entry_2 >> 23) & 1;
6344    useable = (entry_2 >> 20) & 1;
6345#ifdef TARGET_ABI32
6346    lm = 0;
6347#else
6348    lm = (entry_2 >> 21) & 1;
6349#endif
6350    flags = (seg_32bit << 0) | (contents << 1) |
6351        (read_exec_only << 3) | (limit_in_pages << 4) |
6352        (seg_not_present << 5) | (useable << 6) | (lm << 7);
6353    limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6354    base_addr = (entry_1 >> 16) | 
6355        (entry_2 & 0xff000000) | 
6356        ((entry_2 & 0xff) << 16);
6357    target_ldt_info->base_addr = tswapal(base_addr);
6358    target_ldt_info->limit = tswap32(limit);
6359    target_ldt_info->flags = tswap32(flags);
6360    unlock_user_struct(target_ldt_info, ptr, 1);
6361    return 0;
6362}
6363
6364abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6365{
6366    return -TARGET_ENOSYS;
6367}
6368#else
6369abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6370{
6371    abi_long ret = 0;
6372    abi_ulong val;
6373    int idx;
6374
6375    switch(code) {
6376    case TARGET_ARCH_SET_GS:
6377    case TARGET_ARCH_SET_FS:
6378        if (code == TARGET_ARCH_SET_GS)
6379            idx = R_GS;
6380        else
6381            idx = R_FS;
6382        cpu_x86_load_seg(env, idx, 0);
6383        env->segs[idx].base = addr;
6384        break;
6385    case TARGET_ARCH_GET_GS:
6386    case TARGET_ARCH_GET_FS:
6387        if (code == TARGET_ARCH_GET_GS)
6388            idx = R_GS;
6389        else
6390            idx = R_FS;
6391        val = env->segs[idx].base;
6392        if (put_user(val, addr, abi_ulong))
6393            ret = -TARGET_EFAULT;
6394        break;
6395    default:
6396        ret = -TARGET_EINVAL;
6397        break;
6398    }
6399    return ret;
6400}
6401#endif /* defined(TARGET_ABI32 */
6402
6403#endif /* defined(TARGET_I386) */
6404
6405#define NEW_STACK_SIZE 0x40000
6406
6407
6408static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6409typedef struct {
6410    CPUArchState *env;
6411    pthread_mutex_t mutex;
6412    pthread_cond_t cond;
6413    pthread_t thread;
6414    uint32_t tid;
6415    abi_ulong child_tidptr;
6416    abi_ulong parent_tidptr;
6417    sigset_t sigmask;
6418} new_thread_info;
6419
6420static void *clone_func(void *arg)
6421{
6422    new_thread_info *info = arg;
6423    CPUArchState *env;
6424    CPUState *cpu;
6425    TaskState *ts;
6426
6427    rcu_register_thread();
6428    tcg_register_thread();
6429    env = info->env;
6430    cpu = env_cpu(env);
6431    thread_cpu = cpu;
6432    ts = (TaskState *)cpu->opaque;
6433    info->tid = sys_gettid();
6434    task_settid(ts);
6435    if (info->child_tidptr)
6436        put_user_u32(info->tid, info->child_tidptr);
6437    if (info->parent_tidptr)
6438        put_user_u32(info->tid, info->parent_tidptr);
6439    qemu_guest_random_seed_thread_part2(cpu->random_seed);
6440    /* Enable signals.  */
6441    sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6442    /* Signal to the parent that we're ready.  */
6443    pthread_mutex_lock(&info->mutex);
6444    pthread_cond_broadcast(&info->cond);
6445    pthread_mutex_unlock(&info->mutex);
6446    /* Wait until the parent has finished initializing the tls state.  */
6447    pthread_mutex_lock(&clone_lock);
6448    pthread_mutex_unlock(&clone_lock);
6449    cpu_loop(env);
6450    /* never exits */
6451    return NULL;
6452}
6453
6454/* do_fork() Must return host values and target errnos (unlike most
6455   do_*() functions). */
6456static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6457                   abi_ulong parent_tidptr, target_ulong newtls,
6458                   abi_ulong child_tidptr)
6459{
6460    CPUState *cpu = env_cpu(env);
6461    int ret;
6462    TaskState *ts;
6463    CPUState *new_cpu;
6464    CPUArchState *new_env;
6465    sigset_t sigmask;
6466
6467    flags &= ~CLONE_IGNORED_FLAGS;
6468
6469    /* Emulate vfork() with fork() */
6470    if (flags & CLONE_VFORK)
6471        flags &= ~(CLONE_VFORK | CLONE_VM);
6472
6473    if (flags & CLONE_VM) {
6474        TaskState *parent_ts = (TaskState *)cpu->opaque;
6475        new_thread_info info;
6476        pthread_attr_t attr;
6477
6478        if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6479            (flags & CLONE_INVALID_THREAD_FLAGS)) {
6480            return -TARGET_EINVAL;
6481        }
6482
6483        ts = g_new0(TaskState, 1);
6484        init_task_state(ts);
6485
6486        /* Grab a mutex so that thread setup appears atomic.  */
6487        pthread_mutex_lock(&clone_lock);
6488
6489        /*
6490         * If this is our first additional thread, we need to ensure we
6491         * generate code for parallel execution and flush old translations.
6492         * Do this now so that the copy gets CF_PARALLEL too.
6493         */
6494        if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6495            cpu->tcg_cflags |= CF_PARALLEL;
6496            tb_flush(cpu);
6497        }
6498
6499        /* we create a new CPU instance. */
6500        new_env = cpu_copy(env);
6501        /* Init regs that differ from the parent.  */
6502        cpu_clone_regs_child(new_env, newsp, flags);
6503        cpu_clone_regs_parent(env, flags);
6504        new_cpu = env_cpu(new_env);
6505        new_cpu->opaque = ts;
6506        ts->bprm = parent_ts->bprm;
6507        ts->info = parent_ts->info;
6508        ts->signal_mask = parent_ts->signal_mask;
6509
6510        if (flags & CLONE_CHILD_CLEARTID) {
6511            ts->child_tidptr = child_tidptr;
6512        }
6513
6514        if (flags & CLONE_SETTLS) {
6515            cpu_set_tls (new_env, newtls);
6516        }
6517
6518        memset(&info, 0, sizeof(info));
6519        pthread_mutex_init(&info.mutex, NULL);
6520        pthread_mutex_lock(&info.mutex);
6521        pthread_cond_init(&info.cond, NULL);
6522        info.env = new_env;
6523        if (flags & CLONE_CHILD_SETTID) {
6524            info.child_tidptr = child_tidptr;
6525        }
6526        if (flags & CLONE_PARENT_SETTID) {
6527            info.parent_tidptr = parent_tidptr;
6528        }
6529
6530        ret = pthread_attr_init(&attr);
6531        ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6532        ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6533        /* It is not safe to deliver signals until the child has finished
6534           initializing, so temporarily block all signals.  */
6535        sigfillset(&sigmask);
6536        sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6537        cpu->random_seed = qemu_guest_random_seed_thread_part1();
6538
6539        ret = pthread_create(&info.thread, &attr, clone_func, &info);
6540        /* TODO: Free new CPU state if thread creation failed.  */
6541
6542        sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6543        pthread_attr_destroy(&attr);
6544        if (ret == 0) {
6545            /* Wait for the child to initialize.  */
6546            pthread_cond_wait(&info.cond, &info.mutex);
6547            ret = info.tid;
6548        } else {
6549            ret = -1;
6550        }
6551        pthread_mutex_unlock(&info.mutex);
6552        pthread_cond_destroy(&info.cond);
6553        pthread_mutex_destroy(&info.mutex);
6554        pthread_mutex_unlock(&clone_lock);
6555    } else {
6556        /* if no CLONE_VM, we consider it is a fork */
6557        if (flags & CLONE_INVALID_FORK_FLAGS) {
6558            return -TARGET_EINVAL;
6559        }
6560
6561        /* We can't support custom termination signals */
6562        if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6563            return -TARGET_EINVAL;
6564        }
6565
6566        if (block_signals()) {
6567            return -TARGET_ERESTARTSYS;
6568        }
6569
6570        fork_start();
6571        ret = fork();
6572        if (ret == 0) {
6573            /* Child Process.  */
6574            cpu_clone_regs_child(env, newsp, flags);
6575            fork_end(1);
6576            /* There is a race condition here.  The parent process could
6577               theoretically read the TID in the child process before the child
6578               tid is set.  This would require using either ptrace
6579               (not implemented) or having *_tidptr to point at a shared memory
6580               mapping.  We can't repeat the spinlock hack used above because
6581               the child process gets its own copy of the lock.  */
6582            if (flags & CLONE_CHILD_SETTID)
6583                put_user_u32(sys_gettid(), child_tidptr);
6584            if (flags & CLONE_PARENT_SETTID)
6585                put_user_u32(sys_gettid(), parent_tidptr);
6586            ts = (TaskState *)cpu->opaque;
6587            if (flags & CLONE_SETTLS)
6588                cpu_set_tls (env, newtls);
6589            if (flags & CLONE_CHILD_CLEARTID)
6590                ts->child_tidptr = child_tidptr;
6591        } else {
6592            cpu_clone_regs_parent(env, flags);
6593            fork_end(0);
6594        }
6595    }
6596    return ret;
6597}
6598
6599/* warning : doesn't handle linux specific flags... */
6600static int target_to_host_fcntl_cmd(int cmd)
6601{
6602    int ret;
6603
6604    switch(cmd) {
6605    case TARGET_F_DUPFD:
6606    case TARGET_F_GETFD:
6607    case TARGET_F_SETFD:
6608    case TARGET_F_GETFL:
6609    case TARGET_F_SETFL:
6610    case TARGET_F_OFD_GETLK:
6611    case TARGET_F_OFD_SETLK:
6612    case TARGET_F_OFD_SETLKW:
6613        ret = cmd;
6614        break;
6615    case TARGET_F_GETLK:
6616        ret = F_GETLK64;
6617        break;
6618    case TARGET_F_SETLK:
6619        ret = F_SETLK64;
6620        break;
6621    case TARGET_F_SETLKW:
6622        ret = F_SETLKW64;
6623        break;
6624    case TARGET_F_GETOWN:
6625        ret = F_GETOWN;
6626        break;
6627    case TARGET_F_SETOWN:
6628        ret = F_SETOWN;
6629        break;
6630    case TARGET_F_GETSIG:
6631        ret = F_GETSIG;
6632        break;
6633    case TARGET_F_SETSIG:
6634        ret = F_SETSIG;
6635        break;
6636#if TARGET_ABI_BITS == 32
6637    case TARGET_F_GETLK64:
6638        ret = F_GETLK64;
6639        break;
6640    case TARGET_F_SETLK64:
6641        ret = F_SETLK64;
6642        break;
6643    case TARGET_F_SETLKW64:
6644        ret = F_SETLKW64;
6645        break;
6646#endif
6647    case TARGET_F_SETLEASE:
6648        ret = F_SETLEASE;
6649        break;
6650    case TARGET_F_GETLEASE:
6651        ret = F_GETLEASE;
6652        break;
6653#ifdef F_DUPFD_CLOEXEC
6654    case TARGET_F_DUPFD_CLOEXEC:
6655        ret = F_DUPFD_CLOEXEC;
6656        break;
6657#endif
6658    case TARGET_F_NOTIFY:
6659        ret = F_NOTIFY;
6660        break;
6661#ifdef F_GETOWN_EX
6662    case TARGET_F_GETOWN_EX:
6663        ret = F_GETOWN_EX;
6664        break;
6665#endif
6666#ifdef F_SETOWN_EX
6667    case TARGET_F_SETOWN_EX:
6668        ret = F_SETOWN_EX;
6669        break;
6670#endif
6671#ifdef F_SETPIPE_SZ
6672    case TARGET_F_SETPIPE_SZ:
6673        ret = F_SETPIPE_SZ;
6674        break;
6675    case TARGET_F_GETPIPE_SZ:
6676        ret = F_GETPIPE_SZ;
6677        break;
6678#endif
6679#ifdef F_ADD_SEALS
6680    case TARGET_F_ADD_SEALS:
6681        ret = F_ADD_SEALS;
6682        break;
6683    case TARGET_F_GET_SEALS:
6684        ret = F_GET_SEALS;
6685        break;
6686#endif
6687    default:
6688        ret = -TARGET_EINVAL;
6689        break;
6690    }
6691
6692#if defined(__powerpc64__)
6693    /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6694     * is not supported by kernel. The glibc fcntl call actually adjusts
6695     * them to 5, 6 and 7 before making the syscall(). Since we make the
6696     * syscall directly, adjust to what is supported by the kernel.
6697     */
6698    if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6699        ret -= F_GETLK64 - 5;
6700    }
6701#endif
6702
6703    return ret;
6704}
6705
6706#define FLOCK_TRANSTBL \
6707    switch (type) { \
6708    TRANSTBL_CONVERT(F_RDLCK); \
6709    TRANSTBL_CONVERT(F_WRLCK); \
6710    TRANSTBL_CONVERT(F_UNLCK); \
6711    }
6712
6713static int target_to_host_flock(int type)
6714{
6715#define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6716    FLOCK_TRANSTBL
6717#undef  TRANSTBL_CONVERT
6718    return -TARGET_EINVAL;
6719}
6720
6721static int host_to_target_flock(int type)
6722{
6723#define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6724    FLOCK_TRANSTBL
6725#undef  TRANSTBL_CONVERT
6726    /* if we don't know how to convert the value coming
6727     * from the host we copy to the target field as-is
6728     */
6729    return type;
6730}
6731
6732static inline abi_long copy_from_user_flock(struct flock64 *fl,
6733                                            abi_ulong target_flock_addr)
6734{
6735    struct target_flock *target_fl;
6736    int l_type;
6737
6738    if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6739        return -TARGET_EFAULT;
6740    }
6741
6742    __get_user(l_type, &target_fl->l_type);
6743    l_type = target_to_host_flock(l_type);
6744    if (l_type < 0) {
6745        return l_type;
6746    }
6747    fl->l_type = l_type;
6748    __get_user(fl->l_whence, &target_fl->l_whence);
6749    __get_user(fl->l_start, &target_fl->l_start);
6750    __get_user(fl->l_len, &target_fl->l_len);
6751    __get_user(fl->l_pid, &target_fl->l_pid);
6752    unlock_user_struct(target_fl, target_flock_addr, 0);
6753    return 0;
6754}
6755
6756static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6757                                          const struct flock64 *fl)
6758{
6759    struct target_flock *target_fl;
6760    short l_type;
6761
6762    if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6763        return -TARGET_EFAULT;
6764    }
6765
6766    l_type = host_to_target_flock(fl->l_type);
6767    __put_user(l_type, &target_fl->l_type);
6768    __put_user(fl->l_whence, &target_fl->l_whence);
6769    __put_user(fl->l_start, &target_fl->l_start);
6770    __put_user(fl->l_len, &target_fl->l_len);
6771    __put_user(fl->l_pid, &target_fl->l_pid);
6772    unlock_user_struct(target_fl, target_flock_addr, 1);
6773    return 0;
6774}
6775
6776typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6777typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6778
6779#if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6780static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6781                                                   abi_ulong target_flock_addr)
6782{
6783    struct target_oabi_flock64 *target_fl;
6784    int l_type;
6785
6786    if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6787        return -TARGET_EFAULT;
6788    }
6789
6790    __get_user(l_type, &target_fl->l_type);
6791    l_type = target_to_host_flock(l_type);
6792    if (l_type < 0) {
6793        return l_type;
6794    }
6795    fl->l_type = l_type;
6796    __get_user(fl->l_whence, &target_fl->l_whence);
6797    __get_user(fl->l_start, &target_fl->l_start);
6798    __get_user(fl->l_len, &target_fl->l_len);
6799    __get_user(fl->l_pid, &target_fl->l_pid);
6800    unlock_user_struct(target_fl, target_flock_addr, 0);
6801    return 0;
6802}
6803
6804static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6805                                                 const struct flock64 *fl)
6806{
6807    struct target_oabi_flock64 *target_fl;
6808    short l_type;
6809
6810    if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6811        return -TARGET_EFAULT;
6812    }
6813
6814    l_type = host_to_target_flock(fl->l_type);
6815    __put_user(l_type, &target_fl->l_type);
6816    __put_user(fl->l_whence, &target_fl->l_whence);
6817    __put_user(fl->l_start, &target_fl->l_start);
6818    __put_user(fl->l_len, &target_fl->l_len);
6819    __put_user(fl->l_pid, &target_fl->l_pid);
6820    unlock_user_struct(target_fl, target_flock_addr, 1);
6821    return 0;
6822}
6823#endif
6824
6825static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6826                                              abi_ulong target_flock_addr)
6827{
6828    struct target_flock64 *target_fl;
6829    int l_type;
6830
6831    if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6832        return -TARGET_EFAULT;
6833    }
6834
6835    __get_user(l_type, &target_fl->l_type);
6836    l_type = target_to_host_flock(l_type);
6837    if (l_type < 0) {
6838        return l_type;
6839    }
6840    fl->l_type = l_type;
6841    __get_user(fl->l_whence, &target_fl->l_whence);
6842    __get_user(fl->l_start, &target_fl->l_start);
6843    __get_user(fl->l_len, &target_fl->l_len);
6844    __get_user(fl->l_pid, &target_fl->l_pid);
6845    unlock_user_struct(target_fl, target_flock_addr, 0);
6846    return 0;
6847}
6848
6849static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6850                                            const struct flock64 *fl)
6851{
6852    struct target_flock64 *target_fl;
6853    short l_type;
6854
6855    if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6856        return -TARGET_EFAULT;
6857    }
6858
6859    l_type = host_to_target_flock(fl->l_type);
6860    __put_user(l_type, &target_fl->l_type);
6861    __put_user(fl->l_whence, &target_fl->l_whence);
6862    __put_user(fl->l_start, &target_fl->l_start);
6863    __put_user(fl->l_len, &target_fl->l_len);
6864    __put_user(fl->l_pid, &target_fl->l_pid);
6865    unlock_user_struct(target_fl, target_flock_addr, 1);
6866    return 0;
6867}
6868
6869static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6870{
6871    struct flock64 fl64;
6872#ifdef F_GETOWN_EX
6873    struct f_owner_ex fox;
6874    struct target_f_owner_ex *target_fox;
6875#endif
6876    abi_long ret;
6877    int host_cmd = target_to_host_fcntl_cmd(cmd);
6878
6879    if (host_cmd == -TARGET_EINVAL)
6880            return host_cmd;
6881
6882    switch(cmd) {
6883    case TARGET_F_GETLK:
6884        ret = copy_from_user_flock(&fl64, arg);
6885        if (ret) {
6886            return ret;
6887        }
6888        ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6889        if (ret == 0) {
6890            ret = copy_to_user_flock(arg, &fl64);
6891        }
6892        break;
6893
6894    case TARGET_F_SETLK:
6895    case TARGET_F_SETLKW:
6896        ret = copy_from_user_flock(&fl64, arg);
6897        if (ret) {
6898            return ret;
6899        }
6900        ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6901        break;
6902
6903    case TARGET_F_GETLK64:
6904    case TARGET_F_OFD_GETLK:
6905        ret = copy_from_user_flock64(&fl64, arg);
6906        if (ret) {
6907            return ret;
6908        }
6909        ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6910        if (ret == 0) {
6911            ret = copy_to_user_flock64(arg, &fl64);
6912        }
6913        break;
6914    case TARGET_F_SETLK64:
6915    case TARGET_F_SETLKW64:
6916    case TARGET_F_OFD_SETLK:
6917    case TARGET_F_OFD_SETLKW:
6918        ret = copy_from_user_flock64(&fl64, arg);
6919        if (ret) {
6920            return ret;
6921        }
6922        ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6923        break;
6924
6925    case TARGET_F_GETFL:
6926        ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6927        if (ret >= 0) {
6928            ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6929        }
6930        break;
6931
6932    case TARGET_F_SETFL:
6933        ret = get_errno(safe_fcntl(fd, host_cmd,
6934                                   target_to_host_bitmask(arg,
6935                                                          fcntl_flags_tbl)));
6936        break;
6937
6938#ifdef F_GETOWN_EX
6939    case TARGET_F_GETOWN_EX:
6940        ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6941        if (ret >= 0) {
6942            if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6943                return -TARGET_EFAULT;
6944            target_fox->type = tswap32(fox.type);
6945            target_fox->pid = tswap32(fox.pid);
6946            unlock_user_struct(target_fox, arg, 1);
6947        }
6948        break;
6949#endif
6950
6951#ifdef F_SETOWN_EX
6952    case TARGET_F_SETOWN_EX:
6953        if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6954            return -TARGET_EFAULT;
6955        fox.type = tswap32(target_fox->type);
6956        fox.pid = tswap32(target_fox->pid);
6957        unlock_user_struct(target_fox, arg, 0);
6958        ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6959        break;
6960#endif
6961
6962    case TARGET_F_SETSIG:
6963        ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6964        break;
6965
6966    case TARGET_F_GETSIG:
6967        ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6968        break;
6969
6970    case TARGET_F_SETOWN:
6971    case TARGET_F_GETOWN:
6972    case TARGET_F_SETLEASE:
6973    case TARGET_F_GETLEASE:
6974    case TARGET_F_SETPIPE_SZ:
6975    case TARGET_F_GETPIPE_SZ:
6976    case TARGET_F_ADD_SEALS:
6977    case TARGET_F_GET_SEALS:
6978        ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6979        break;
6980
6981    default:
6982        ret = get_errno(safe_fcntl(fd, cmd, arg));
6983        break;
6984    }
6985    return ret;
6986}
6987
6988#ifdef USE_UID16
6989
6990static inline int high2lowuid(int uid)
6991{
6992    if (uid > 65535)
6993        return 65534;
6994    else
6995        return uid;
6996}
6997
6998static inline int high2lowgid(int gid)
6999{
7000    if (gid > 65535)
7001        return 65534;
7002    else
7003        return gid;
7004}
7005
7006static inline int low2highuid(int uid)
7007{
7008    if ((int16_t)uid == -1)
7009        return -1;
7010    else
7011        return uid;
7012}
7013
7014static inline int low2highgid(int gid)
7015{
7016    if ((int16_t)gid == -1)
7017        return -1;
7018    else
7019        return gid;
7020}
7021static inline int tswapid(int id)
7022{
7023    return tswap16(id);
7024}
7025
7026#define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7027
7028#else /* !USE_UID16 */
7029static inline int high2lowuid(int uid)
7030{
7031    return uid;
7032}
7033static inline int high2lowgid(int gid)
7034{
7035    return gid;
7036}
7037static inline int low2highuid(int uid)
7038{
7039    return uid;
7040}
7041static inline int low2highgid(int gid)
7042{
7043    return gid;
7044}
7045static inline int tswapid(int id)
7046{
7047    return tswap32(id);
7048}
7049
7050#define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7051
7052#endif /* USE_UID16 */
7053
7054/* We must do direct syscalls for setting UID/GID, because we want to
7055 * implement the Linux system call semantics of "change only for this thread",
7056 * not the libc/POSIX semantics of "change for all threads in process".
7057 * (See http://ewontfix.com/17/ for more details.)
7058 * We use the 32-bit version of the syscalls if present; if it is not
7059 * then either the host architecture supports 32-bit UIDs natively with
7060 * the standard syscall, or the 16-bit UID is the best we can do.
7061 */
7062#ifdef __NR_setuid32
7063#define __NR_sys_setuid __NR_setuid32
7064#else
7065#define __NR_sys_setuid __NR_setuid
7066#endif
7067#ifdef __NR_setgid32
7068#define __NR_sys_setgid __NR_setgid32
7069#else
7070#define __NR_sys_setgid __NR_setgid
7071#endif
7072#ifdef __NR_setresuid32
7073#define __NR_sys_setresuid __NR_setresuid32
7074#else
7075#define __NR_sys_setresuid __NR_setresuid
7076#endif
7077#ifdef __NR_setresgid32
7078#define __NR_sys_setresgid __NR_setresgid32
7079#else
7080#define __NR_sys_setresgid __NR_setresgid
7081#endif
7082
7083_syscall1(int, sys_setuid, uid_t, uid)
7084_syscall1(int, sys_setgid, gid_t, gid)
7085_syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7086_syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7087
7088void syscall_init(void)
7089{
7090    IOCTLEntry *ie;
7091    const argtype *arg_type;
7092    int size;
7093    int i;
7094
7095    thunk_init(STRUCT_MAX);
7096
7097#define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7098#define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7099#include "syscall_types.h"
7100#undef STRUCT
7101#undef STRUCT_SPECIAL
7102
7103    /* Build target_to_host_errno_table[] table from
7104     * host_to_target_errno_table[]. */
7105    for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7106        target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7107    }
7108
7109    /* we patch the ioctl size if necessary. We rely on the fact that
7110       no ioctl has all the bits at '1' in the size field */
7111    ie = ioctl_entries;
7112    while (ie->target_cmd != 0) {
7113        if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7114            TARGET_IOC_SIZEMASK) {
7115            arg_type = ie->arg_type;
7116            if (arg_type[0] != TYPE_PTR) {
7117                fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7118                        ie->target_cmd);
7119                exit(1);
7120            }
7121            arg_type++;
7122            size = thunk_type_size(arg_type, 0);
7123            ie->target_cmd = (ie->target_cmd &
7124                              ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7125                (size << TARGET_IOC_SIZESHIFT);
7126        }
7127
7128        /* automatic consistency check if same arch */
7129#if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7130    (defined(__x86_64__) && defined(TARGET_X86_64))
7131        if (unlikely(ie->target_cmd != ie->host_cmd)) {
7132            fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7133                    ie->name, ie->target_cmd, ie->host_cmd);
7134        }
7135#endif
7136        ie++;
7137    }
7138}
7139
7140#ifdef TARGET_NR_truncate64
7141static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7142                                         abi_long arg2,
7143                                         abi_long arg3,
7144                                         abi_long arg4)
7145{
7146    if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7147        arg2 = arg3;
7148        arg3 = arg4;
7149    }
7150    return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7151}
7152#endif
7153
7154#ifdef TARGET_NR_ftruncate64
7155static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7156                                          abi_long arg2,
7157                                          abi_long arg3,
7158                                          abi_long arg4)
7159{
7160    if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7161        arg2 = arg3;
7162        arg3 = arg4;
7163    }
7164    return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7165}
7166#endif
7167
7168#if defined(TARGET_NR_timer_settime) || \
7169    (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7170static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7171                                                 abi_ulong target_addr)
7172{
7173    if (target_to_host_timespec(&host_its->it_interval, target_addr +
7174                                offsetof(struct target_itimerspec,
7175                                         it_interval)) ||
7176        target_to_host_timespec(&host_its->it_value, target_addr +
7177                                offsetof(struct target_itimerspec,
7178                                         it_value))) {
7179        return -TARGET_EFAULT;
7180    }
7181
7182    return 0;
7183}
7184#endif
7185
7186#if defined(TARGET_NR_timer_settime64) || \
7187    (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7188static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7189                                                   abi_ulong target_addr)
7190{
7191    if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7192                                  offsetof(struct target__kernel_itimerspec,
7193                                           it_interval)) ||
7194        target_to_host_timespec64(&host_its->it_value, target_addr +
7195                                  offsetof(struct target__kernel_itimerspec,
7196                                           it_value))) {
7197        return -TARGET_EFAULT;
7198    }
7199
7200    return 0;
7201}
7202#endif
7203
7204#if ((defined(TARGET_NR_timerfd_gettime) || \
7205      defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7206      defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7207static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7208                                                 struct itimerspec *host_its)
7209{
7210    if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7211                                                       it_interval),
7212                                &host_its->it_interval) ||
7213        host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7214                                                       it_value),
7215                                &host_its->it_value)) {
7216        return -TARGET_EFAULT;
7217    }
7218    return 0;
7219}
7220#endif
7221
7222#if ((defined(TARGET_NR_timerfd_gettime64) || \
7223      defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7224      defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7225static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7226                                                   struct itimerspec *host_its)
7227{
7228    if (host_to_target_timespec64(target_addr +
7229                                  offsetof(struct target__kernel_itimerspec,
7230                                           it_interval),
7231                                  &host_its->it_interval) ||
7232        host_to_target_timespec64(target_addr +
7233                                  offsetof(struct target__kernel_itimerspec,
7234                                           it_value),
7235                                  &host_its->it_value)) {
7236        return -TARGET_EFAULT;
7237    }
7238    return 0;
7239}
7240#endif
7241
7242#if defined(TARGET_NR_adjtimex) || \
7243    (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7244static inline abi_long target_to_host_timex(struct timex *host_tx,
7245                                            abi_long target_addr)
7246{
7247    struct target_timex *target_tx;
7248
7249    if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7250        return -TARGET_EFAULT;
7251    }
7252
7253    __get_user(host_tx->modes, &target_tx->modes);
7254    __get_user(host_tx->offset, &target_tx->offset);
7255    __get_user(host_tx->freq, &target_tx->freq);
7256    __get_user(host_tx->maxerror, &target_tx->maxerror);
7257    __get_user(host_tx->esterror, &target_tx->esterror);
7258    __get_user(host_tx->status, &target_tx->status);
7259    __get_user(host_tx->constant, &target_tx->constant);
7260    __get_user(host_tx->precision, &target_tx->precision);
7261    __get_user(host_tx->tolerance, &target_tx->tolerance);
7262    __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7263    __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7264    __get_user(host_tx->tick, &target_tx->tick);
7265    __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7266    __get_user(host_tx->jitter, &target_tx->jitter);
7267    __get_user(host_tx->shift, &target_tx->shift);
7268    __get_user(host_tx->stabil, &target_tx->stabil);
7269    __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7270    __get_user(host_tx->calcnt, &target_tx->calcnt);
7271    __get_user(host_tx->errcnt, &target_tx->errcnt);
7272    __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7273    __get_user(host_tx->tai, &target_tx->tai);
7274
7275    unlock_user_struct(target_tx, target_addr, 0);
7276    return 0;
7277}
7278
7279static inline abi_long host_to_target_timex(abi_long target_addr,
7280                                            struct timex *host_tx)
7281{
7282    struct target_timex *target_tx;
7283
7284    if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7285        return -TARGET_EFAULT;
7286    }
7287
7288    __put_user(host_tx->modes, &target_tx->modes);
7289    __put_user(host_tx->offset, &target_tx->offset);
7290    __put_user(host_tx->freq, &target_tx->freq);
7291    __put_user(host_tx->maxerror, &target_tx->maxerror);
7292    __put_user(host_tx->esterror, &target_tx->esterror);
7293    __put_user(host_tx->status, &target_tx->status);
7294    __put_user(host_tx->constant, &target_tx->constant);
7295    __put_user(host_tx->precision, &target_tx->precision);
7296    __put_user(host_tx->tolerance, &target_tx->tolerance);
7297    __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7298    __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7299    __put_user(host_tx->tick, &target_tx->tick);
7300    __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7301    __put_user(host_tx->jitter, &target_tx->jitter);
7302    __put_user(host_tx->shift, &target_tx->shift);
7303    __put_user(host_tx->stabil, &target_tx->stabil);
7304    __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7305    __put_user(host_tx->calcnt, &target_tx->calcnt);
7306    __put_user(host_tx->errcnt, &target_tx->errcnt);
7307    __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7308    __put_user(host_tx->tai, &target_tx->tai);
7309
7310    unlock_user_struct(target_tx, target_addr, 1);
7311    return 0;
7312}
7313#endif
7314
7315
7316#if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7317static inline abi_long target_to_host_timex64(struct timex *host_tx,
7318                                              abi_long target_addr)
7319{
7320    struct target__kernel_timex *target_tx;
7321
7322    if (copy_from_user_timeval64(&host_tx->time, target_addr +
7323                                 offsetof(struct target__kernel_timex,
7324                                          time))) {
7325        return -TARGET_EFAULT;
7326    }
7327
7328    if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7329        return -TARGET_EFAULT;
7330    }
7331
7332    __get_user(host_tx->modes, &target_tx->modes);
7333    __get_user(host_tx->offset, &target_tx->offset);
7334    __get_user(host_tx->freq, &target_tx->freq);
7335    __get_user(host_tx->maxerror, &target_tx->maxerror);
7336    __get_user(host_tx->esterror, &target_tx->esterror);
7337    __get_user(host_tx->status, &target_tx->status);
7338    __get_user(host_tx->constant, &target_tx->constant);
7339    __get_user(host_tx->precision, &target_tx->precision);
7340    __get_user(host_tx->tolerance, &target_tx->tolerance);
7341    __get_user(host_tx->tick, &target_tx->tick);
7342    __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7343    __get_user(host_tx->jitter, &target_tx->jitter);
7344    __get_user(host_tx->shift, &target_tx->shift);
7345    __get_user(host_tx->stabil, &target_tx->stabil);
7346    __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7347    __get_user(host_tx->calcnt, &target_tx->calcnt);
7348    __get_user(host_tx->errcnt, &target_tx->errcnt);
7349    __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7350    __get_user(host_tx->tai, &target_tx->tai);
7351
7352    unlock_user_struct(target_tx, target_addr, 0);
7353    return 0;
7354}
7355
7356static inline abi_long host_to_target_timex64(abi_long target_addr,
7357                                              struct timex *host_tx)
7358{
7359    struct target__kernel_timex *target_tx;
7360
7361   if (copy_to_user_timeval64(target_addr +
7362                              offsetof(struct target__kernel_timex, time),
7363                              &host_tx->time)) {
7364        return -TARGET_EFAULT;
7365    }
7366
7367    if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7368        return -TARGET_EFAULT;
7369    }
7370
7371    __put_user(host_tx->modes, &target_tx->modes);
7372    __put_user(host_tx->offset, &target_tx->offset);
7373    __put_user(host_tx->freq, &target_tx->freq);
7374    __put_user(host_tx->maxerror, &target_tx->maxerror);
7375    __put_user(host_tx->esterror, &target_tx->esterror);
7376    __put_user(host_tx->status, &target_tx->status);
7377    __put_user(host_tx->constant, &target_tx->constant);
7378    __put_user(host_tx->precision, &target_tx->precision);
7379    __put_user(host_tx->tolerance, &target_tx->tolerance);
7380    __put_user(host_tx->tick, &target_tx->tick);
7381    __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7382    __put_user(host_tx->jitter, &target_tx->jitter);
7383    __put_user(host_tx->shift, &target_tx->shift);
7384    __put_user(host_tx->stabil, &target_tx->stabil);
7385    __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7386    __put_user(host_tx->calcnt, &target_tx->calcnt);
7387    __put_user(host_tx->errcnt, &target_tx->errcnt);
7388    __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7389    __put_user(host_tx->tai, &target_tx->tai);
7390
7391    unlock_user_struct(target_tx, target_addr, 1);
7392    return 0;
7393}
7394#endif
7395
7396static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7397                                               abi_ulong target_addr)
7398{
7399    struct target_sigevent *target_sevp;
7400
7401    if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7402        return -TARGET_EFAULT;
7403    }
7404
7405    /* This union is awkward on 64 bit systems because it has a 32 bit
7406     * integer and a pointer in it; we follow the conversion approach
7407     * used for handling sigval types in signal.c so the guest should get
7408     * the correct value back even if we did a 64 bit byteswap and it's
7409     * using the 32 bit integer.
7410     */
7411    host_sevp->sigev_value.sival_ptr =
7412        (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7413    host_sevp->sigev_signo =
7414        target_to_host_signal(tswap32(target_sevp->sigev_signo));
7415    host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7416    host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7417
7418    unlock_user_struct(target_sevp, target_addr, 1);
7419    return 0;
7420}
7421
7422#if defined(TARGET_NR_mlockall)
7423static inline int target_to_host_mlockall_arg(int arg)
7424{
7425    int result = 0;
7426
7427    if (arg & TARGET_MCL_CURRENT) {
7428        result |= MCL_CURRENT;
7429    }
7430    if (arg & TARGET_MCL_FUTURE) {
7431        result |= MCL_FUTURE;
7432    }
7433#ifdef MCL_ONFAULT
7434    if (arg & TARGET_MCL_ONFAULT) {
7435        result |= MCL_ONFAULT;
7436    }
7437#endif
7438
7439    return result;
7440}
7441#endif
7442
7443#if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7444     defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7445     defined(TARGET_NR_newfstatat))
7446static inline abi_long host_to_target_stat64(void *cpu_env,
7447                                             abi_ulong target_addr,
7448                                             struct stat *host_st)
7449{
7450#if defined(TARGET_ARM) && defined(TARGET_ABI32)
7451    if (((CPUARMState *)cpu_env)->eabi) {
7452        struct target_eabi_stat64 *target_st;
7453
7454        if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7455            return -TARGET_EFAULT;
7456        memset(target_st, 0, sizeof(struct target_eabi_stat64));
7457        __put_user(host_st->st_dev, &target_st->st_dev);
7458        __put_user(host_st->st_ino, &target_st->st_ino);
7459#ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7460        __put_user(host_st->st_ino, &target_st->__st_ino);
7461#endif
7462        __put_user(host_st->st_mode, &target_st->st_mode);
7463        __put_user(host_st->st_nlink, &target_st->st_nlink);
7464        __put_user(host_st->st_uid, &target_st->st_uid);
7465        __put_user(host_st->st_gid, &target_st->st_gid);
7466        __put_user(host_st->st_rdev, &target_st->st_rdev);
7467        __put_user(host_st->st_size, &target_st->st_size);
7468        __put_user(host_st->st_blksize, &target_st->st_blksize);
7469        __put_user(host_st->st_blocks, &target_st->st_blocks);
7470        __put_user(host_st->st_atime, &target_st->target_st_atime);
7471        __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7472        __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7473#if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7474        __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7475        __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7476        __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7477#endif
7478        unlock_user_struct(target_st, target_addr, 1);
7479    } else
7480#endif
7481    {
7482#if defined(TARGET_HAS_STRUCT_STAT64)
7483        struct target_stat64 *target_st;
7484#else
7485        struct target_stat *target_st;
7486#endif
7487
7488        if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7489            return -TARGET_EFAULT;
7490        memset(target_st, 0, sizeof(*target_st));
7491        __put_user(host_st->st_dev, &target_st->st_dev);
7492        __put_user(host_st->st_ino, &target_st->st_ino);
7493#ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7494        __put_user(host_st->st_ino, &target_st->__st_ino);
7495#endif
7496        __put_user(host_st->st_mode, &target_st->st_mode);
7497        __put_user(host_st->st_nlink, &target_st->st_nlink);
7498        __put_user(host_st->st_uid, &target_st->st_uid);
7499        __put_user(host_st->st_gid, &target_st->st_gid);
7500        __put_user(host_st->st_rdev, &target_st->st_rdev);
7501        /* XXX: better use of kernel struct */
7502        __put_user(host_st->st_size, &target_st->st_size);
7503        __put_user(host_st->st_blksize, &target_st->st_blksize);
7504        __put_user(host_st->st_blocks, &target_st->st_blocks);
7505        __put_user(host_st->st_atime, &target_st->target_st_atime);
7506        __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7507        __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7508#if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7509        __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7510        __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7511        __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7512#endif
7513        unlock_user_struct(target_st, target_addr, 1);
7514    }
7515
7516    return 0;
7517}
7518#endif
7519
7520#if defined(TARGET_NR_statx) && defined(__NR_statx)
7521static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7522                                            abi_ulong target_addr)
7523{
7524    struct target_statx *target_stx;
7525
7526    if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7527        return -TARGET_EFAULT;
7528    }
7529    memset(target_stx, 0, sizeof(*target_stx));
7530
7531    __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7532    __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7533    __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7534    __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7535    __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7536    __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7537    __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7538    __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7539    __put_user(host_stx->stx_size, &target_stx->stx_size);
7540    __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7541    __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7542    __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7543    __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7544    __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7545    __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7546    __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7547    __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7548    __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7549    __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7550    __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7551    __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7552    __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7553    __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7554
7555    unlock_user_struct(target_stx, target_addr, 1);
7556
7557    return 0;
7558}
7559#endif
7560
7561static int do_sys_futex(int *uaddr, int op, int val,
7562                         const struct timespec *timeout, int *uaddr2,
7563                         int val3)
7564{
7565#if HOST_LONG_BITS == 64
7566#if defined(__NR_futex)
7567    /* always a 64-bit time_t, it doesn't define _time64 version  */
7568    return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7569
7570#endif
7571#else /* HOST_LONG_BITS == 64 */
7572#if defined(__NR_futex_time64)
7573    if (sizeof(timeout->tv_sec) == 8) {
7574        /* _time64 function on 32bit arch */
7575        return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7576    }
7577#endif
7578#if defined(__NR_futex)
7579    /* old function on 32bit arch */
7580    return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7581#endif
7582#endif /* HOST_LONG_BITS == 64 */
7583    g_assert_not_reached();
7584}
7585
7586static int do_safe_futex(int *uaddr, int op, int val,
7587                         const struct timespec *timeout, int *uaddr2,
7588                         int val3)
7589{
7590#if HOST_LONG_BITS == 64
7591#if defined(__NR_futex)
7592    /* always a 64-bit time_t, it doesn't define _time64 version  */
7593    return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7594#endif
7595#else /* HOST_LONG_BITS == 64 */
7596#if defined(__NR_futex_time64)
7597    if (sizeof(timeout->tv_sec) == 8) {
7598        /* _time64 function on 32bit arch */
7599        return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7600                                           val3));
7601    }
7602#endif
7603#if defined(__NR_futex)
7604    /* old function on 32bit arch */
7605    return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7606#endif
7607#endif /* HOST_LONG_BITS == 64 */
7608    return -TARGET_ENOSYS;
7609}
7610
7611/* ??? Using host futex calls even when target atomic operations
7612   are not really atomic probably breaks things.  However implementing
7613   futexes locally would make futexes shared between multiple processes
7614   tricky.  However they're probably useless because guest atomic
7615   operations won't work either.  */
7616#if defined(TARGET_NR_futex)
7617static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7618                    target_ulong timeout, target_ulong uaddr2, int val3)
7619{
7620    struct timespec ts, *pts;
7621    int base_op;
7622
7623    /* ??? We assume FUTEX_* constants are the same on both host
7624       and target.  */
7625#ifdef FUTEX_CMD_MASK
7626    base_op = op & FUTEX_CMD_MASK;
7627#else
7628    base_op = op;
7629#endif
7630    switch (base_op) {
7631    case FUTEX_WAIT:
7632    case FUTEX_WAIT_BITSET:
7633        if (timeout) {
7634            pts = &ts;
7635            target_to_host_timespec(pts, timeout);
7636        } else {
7637            pts = NULL;
7638        }
7639        return do_safe_futex(g2h(cpu, uaddr),
7640                             op, tswap32(val), pts, NULL, val3);
7641    case FUTEX_WAKE:
7642        return do_safe_futex(g2h(cpu, uaddr),
7643                             op, val, NULL, NULL, 0);
7644    case FUTEX_FD:
7645        return do_safe_futex(g2h(cpu, uaddr),
7646                             op, val, NULL, NULL, 0);
7647    case FUTEX_REQUEUE:
7648    case FUTEX_CMP_REQUEUE:
7649    case FUTEX_WAKE_OP:
7650        /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7651           TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7652           But the prototype takes a `struct timespec *'; insert casts
7653           to satisfy the compiler.  We do not need to tswap TIMEOUT
7654           since it's not compared to guest memory.  */
7655        pts = (struct timespec *)(uintptr_t) timeout;
7656        return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7657                             (base_op == FUTEX_CMP_REQUEUE
7658                              ? tswap32(val3) : val3));
7659    default:
7660        return -TARGET_ENOSYS;
7661    }
7662}
7663#endif
7664
7665#if defined(TARGET_NR_futex_time64)
7666static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7667                           int val, target_ulong timeout,
7668                           target_ulong uaddr2, int val3)
7669{
7670    struct timespec ts, *pts;
7671    int base_op;
7672
7673    /* ??? We assume FUTEX_* constants are the same on both host
7674       and target.  */
7675#ifdef FUTEX_CMD_MASK
7676    base_op = op & FUTEX_CMD_MASK;
7677#else
7678    base_op = op;
7679#endif
7680    switch (base_op) {
7681    case FUTEX_WAIT:
7682    case FUTEX_WAIT_BITSET:
7683        if (timeout) {
7684            pts = &ts;
7685            if (target_to_host_timespec64(pts, timeout)) {
7686                return -TARGET_EFAULT;
7687            }
7688        } else {
7689            pts = NULL;
7690        }
7691        return do_safe_futex(g2h(cpu, uaddr), op,
7692                             tswap32(val), pts, NULL, val3);
7693    case FUTEX_WAKE:
7694        return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7695    case FUTEX_FD:
7696        return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7697    case FUTEX_REQUEUE:
7698    case FUTEX_CMP_REQUEUE:
7699    case FUTEX_WAKE_OP:
7700        /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7701           TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7702           But the prototype takes a `struct timespec *'; insert casts
7703           to satisfy the compiler.  We do not need to tswap TIMEOUT
7704           since it's not compared to guest memory.  */
7705        pts = (struct timespec *)(uintptr_t) timeout;
7706        return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7707                             (base_op == FUTEX_CMP_REQUEUE
7708                              ? tswap32(val3) : val3));
7709    default:
7710        return -TARGET_ENOSYS;
7711    }
7712}
7713#endif
7714
7715#if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7716static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7717                                     abi_long handle, abi_long mount_id,
7718                                     abi_long flags)
7719{
7720    struct file_handle *target_fh;
7721    struct file_handle *fh;
7722    int mid = 0;
7723    abi_long ret;
7724    char *name;
7725    unsigned int size, total_size;
7726
7727    if (get_user_s32(size, handle)) {
7728        return -TARGET_EFAULT;
7729    }
7730
7731    name = lock_user_string(pathname);
7732    if (!name) {
7733        return -TARGET_EFAULT;
7734    }
7735
7736    total_size = sizeof(struct file_handle) + size;
7737    target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7738    if (!target_fh) {
7739        unlock_user(name, pathname, 0);
7740        return -TARGET_EFAULT;
7741    }
7742
7743    fh = g_malloc0(total_size);
7744    fh->handle_bytes = size;
7745
7746    ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7747    unlock_user(name, pathname, 0);
7748
7749    /* man name_to_handle_at(2):
7750     * Other than the use of the handle_bytes field, the caller should treat
7751     * the file_handle structure as an opaque data type
7752     */
7753
7754    memcpy(target_fh, fh, total_size);
7755    target_fh->handle_bytes = tswap32(fh->handle_bytes);
7756    target_fh->handle_type = tswap32(fh->handle_type);
7757    g_free(fh);
7758    unlock_user(target_fh, handle, total_size);
7759
7760    if (put_user_s32(mid, mount_id)) {
7761        return -TARGET_EFAULT;
7762    }
7763
7764    return ret;
7765
7766}
7767#endif
7768
7769#if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7770static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7771                                     abi_long flags)
7772{
7773    struct file_handle *target_fh;
7774    struct file_handle *fh;
7775    unsigned int size, total_size;
7776    abi_long ret;
7777
7778    if (get_user_s32(size, handle)) {
7779        return -TARGET_EFAULT;
7780    }
7781
7782    total_size = sizeof(struct file_handle) + size;
7783    target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7784    if (!target_fh) {
7785        return -TARGET_EFAULT;
7786    }
7787
7788    fh = g_memdup(target_fh, total_size);
7789    fh->handle_bytes = size;
7790    fh->handle_type = tswap32(target_fh->handle_type);
7791
7792    ret = get_errno(open_by_handle_at(mount_fd, fh,
7793                    target_to_host_bitmask(flags, fcntl_flags_tbl)));
7794
7795    g_free(fh);
7796
7797    unlock_user(target_fh, handle, total_size);
7798
7799    return ret;
7800}
7801#endif
7802
7803#if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7804
7805static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7806{
7807    int host_flags;
7808    target_sigset_t *target_mask;
7809    sigset_t host_mask;
7810    abi_long ret;
7811
7812    if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7813        return -TARGET_EINVAL;
7814    }
7815    if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7816        return -TARGET_EFAULT;
7817    }
7818
7819    target_to_host_sigset(&host_mask, target_mask);
7820
7821    host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7822
7823    ret = get_errno(signalfd(fd, &host_mask, host_flags));
7824    if (ret >= 0) {
7825        fd_trans_register(ret, &target_signalfd_trans);
7826    }
7827
7828    unlock_user_struct(target_mask, mask, 0);
7829
7830    return ret;
7831}
7832#endif
7833
7834/* Map host to target signal numbers for the wait family of syscalls.
7835   Assume all other status bits are the same.  */
7836int host_to_target_waitstatus(int status)
7837{
7838    if (WIFSIGNALED(status)) {
7839        return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7840    }
7841    if (WIFSTOPPED(status)) {
7842        return (host_to_target_signal(WSTOPSIG(status)) << 8)
7843               | (status & 0xff);
7844    }
7845    return status;
7846}
7847
7848static int open_self_cmdline(void *cpu_env, int fd)
7849{
7850    CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7851    struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7852    int i;
7853
7854    for (i = 0; i < bprm->argc; i++) {
7855        size_t len = strlen(bprm->argv[i]) + 1;
7856
7857        if (write(fd, bprm->argv[i], len) != len) {
7858            return -1;
7859        }
7860    }
7861
7862    return 0;
7863}
7864
7865static int open_self_maps(void *cpu_env, int fd)
7866{
7867    CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7868    TaskState *ts = cpu->opaque;
7869    GSList *map_info = read_self_maps();
7870    GSList *s;
7871    int count;
7872
7873    for (s = map_info; s; s = g_slist_next(s)) {
7874        MapInfo *e = (MapInfo *) s->data;
7875
7876        if (h2g_valid(e->start)) {
7877            unsigned long min = e->start;
7878            unsigned long max = e->end;
7879            int flags = page_get_flags(h2g(min));
7880            const char *path;
7881
7882            max = h2g_valid(max - 1) ?
7883                max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7884
7885            if (page_check_range(h2g(min), max - min, flags) == -1) {
7886                continue;
7887            }
7888
7889            if (h2g(min) == ts->info->stack_limit) {
7890                path = "[stack]";
7891            } else {
7892                path = e->path;
7893            }
7894
7895            count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7896                            " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7897                            h2g(min), h2g(max - 1) + 1,
7898                            (flags & PAGE_READ) ? 'r' : '-',
7899                            (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7900                            (flags & PAGE_EXEC) ? 'x' : '-',
7901                            e->is_priv ? 'p' : '-',
7902                            (uint64_t) e->offset, e->dev, e->inode);
7903            if (path) {
7904                dprintf(fd, "%*s%s\n", 73 - count, "", path);
7905            } else {
7906                dprintf(fd, "\n");
7907            }
7908        }
7909    }
7910
7911    free_self_maps(map_info);
7912
7913#ifdef TARGET_VSYSCALL_PAGE
7914    /*
7915     * We only support execution from the vsyscall page.
7916     * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7917     */
7918    count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7919                    " --xp 00000000 00:00 0",
7920                    TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7921    dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7922#endif
7923
7924    return 0;
7925}
7926
7927static int open_self_stat(void *cpu_env, int fd)
7928{
7929    CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7930    TaskState *ts = cpu->opaque;
7931    g_autoptr(GString) buf = g_string_new(NULL);
7932    int i;
7933
7934    for (i = 0; i < 44; i++) {
7935        if (i == 0) {
7936            /* pid */
7937            g_string_printf(buf, FMT_pid " ", getpid());
7938        } else if (i == 1) {
7939            /* app name */
7940            gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7941            bin = bin ? bin + 1 : ts->bprm->argv[0];
7942            g_string_printf(buf, "(%.15s) ", bin);
7943        } else if (i == 27) {
7944            /* stack bottom */
7945            g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7946        } else {
7947            /* for the rest, there is MasterCard */
7948            g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7949        }
7950
7951        if (write(fd, buf->str, buf->len) != buf->len) {
7952            return -1;
7953        }
7954    }
7955
7956    return 0;
7957}
7958
7959static int open_self_auxv(void *cpu_env, int fd)
7960{
7961    CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7962    TaskState *ts = cpu->opaque;
7963    abi_ulong auxv = ts->info->saved_auxv;
7964    abi_ulong len = ts->info->auxv_len;
7965    char *ptr;
7966
7967    /*
7968     * Auxiliary vector is stored in target process stack.
7969     * read in whole auxv vector and copy it to file
7970     */
7971    ptr = lock_user(VERIFY_READ, auxv, len, 0);
7972    if (ptr != NULL) {
7973        while (len > 0) {
7974            ssize_t r;
7975            r = write(fd, ptr, len);
7976            if (r <= 0) {
7977                break;
7978            }
7979            len -= r;
7980            ptr += r;
7981        }
7982        lseek(fd, 0, SEEK_SET);
7983        unlock_user(ptr, auxv, len);
7984    }
7985
7986    return 0;
7987}
7988
7989static int is_proc_myself(const char *filename, const char *entry)
7990{
7991    if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7992        filename += strlen("/proc/");
7993        if (!strncmp(filename, "self/", strlen("self/"))) {
7994            filename += strlen("self/");
7995        } else if (*filename >= '1' && *filename <= '9') {
7996            char myself[80];
7997            snprintf(myself, sizeof(myself), "%d/", getpid());
7998            if (!strncmp(filename, myself, strlen(myself))) {
7999                filename += strlen(myself);
8000            } else {
8001                return 0;
8002            }
8003        } else {
8004            return 0;
8005        }
8006        if (!strcmp(filename, entry)) {
8007            return 1;
8008        }
8009    }
8010    return 0;
8011}
8012
8013#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8014    defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8015static int is_proc(const char *filename, const char *entry)
8016{
8017    return strcmp(filename, entry) == 0;
8018}
8019#endif
8020
8021#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8022static int open_net_route(void *cpu_env, int fd)
8023{
8024    FILE *fp;
8025    char *line = NULL;
8026    size_t len = 0;
8027    ssize_t read;
8028
8029    fp = fopen("/proc/net/route", "r");
8030    if (fp == NULL) {
8031        return -1;
8032    }
8033
8034    /* read header */
8035
8036    read = getline(&line, &len, fp);
8037    dprintf(fd, "%s", line);
8038
8039    /* read routes */
8040
8041    while ((read = getline(&line, &len, fp)) != -1) {
8042        char iface[16];
8043        uint32_t dest, gw, mask;
8044        unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8045        int fields;
8046
8047        fields = sscanf(line,
8048                        "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8049                        iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8050                        &mask, &mtu, &window, &irtt);
8051        if (fields != 11) {
8052            continue;
8053        }
8054        dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8055                iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8056                metric, tswap32(mask), mtu, window, irtt);
8057    }
8058
8059    free(line);
8060    fclose(fp);
8061
8062    return 0;
8063}
8064#endif
8065
8066#if defined(TARGET_SPARC)
8067static int open_cpuinfo(void *cpu_env, int fd)
8068{
8069    dprintf(fd, "type\t\t: sun4u\n");
8070    return 0;
8071}
8072#endif
8073
8074#if defined(TARGET_HPPA)
8075static int open_cpuinfo(void *cpu_env, int fd)
8076{
8077    dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8078    dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8079    dprintf(fd, "capabilities\t: os32\n");
8080    dprintf(fd, "model\t\t: 9000/778/B160L\n");
8081    dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8082    return 0;
8083}
8084#endif
8085
8086#if defined(TARGET_M68K)
8087static int open_hardware(void *cpu_env, int fd)
8088{
8089    dprintf(fd, "Model:\t\tqemu-m68k\n");
8090    return 0;
8091}
8092#endif
8093
8094static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8095{
8096    struct fake_open {
8097        const char *filename;
8098        int (*fill)(void *cpu_env, int fd);
8099        int (*cmp)(const char *s1, const char *s2);
8100    };
8101    const struct fake_open *fake_open;
8102    static const struct fake_open fakes[] = {
8103        { "maps", open_self_maps, is_proc_myself },
8104        { "stat", open_self_stat, is_proc_myself },
8105        { "auxv", open_self_auxv, is_proc_myself },
8106        { "cmdline", open_self_cmdline, is_proc_myself },
8107#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8108        { "/proc/net/route", open_net_route, is_proc },
8109#endif
8110#if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8111        { "/proc/cpuinfo", open_cpuinfo, is_proc },
8112#endif
8113#if defined(TARGET_M68K)
8114        { "/proc/hardware", open_hardware, is_proc },
8115#endif
8116        { NULL, NULL, NULL }
8117    };
8118
8119    if (is_proc_myself(pathname, "exe")) {
8120        int execfd = qemu_getauxval(AT_EXECFD);
8121        return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8122    }
8123
8124    for (fake_open = fakes; fake_open->filename; fake_open++) {
8125        if (fake_open->cmp(pathname, fake_open->filename)) {
8126            break;
8127        }
8128    }
8129
8130    if (fake_open->filename) {
8131        const char *tmpdir;
8132        char filename[PATH_MAX];
8133        int fd, r;
8134
8135        /* create temporary file to map stat to */
8136        tmpdir = getenv("TMPDIR");
8137        if (!tmpdir)
8138            tmpdir = "/tmp";
8139        snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8140        fd = mkstemp(filename);
8141        if (fd < 0) {
8142            return fd;
8143        }
8144        unlink(filename);
8145
8146        if ((r = fake_open->fill(cpu_env, fd))) {
8147            int e = errno;
8148            close(fd);
8149            errno = e;
8150            return r;
8151        }
8152        lseek(fd, 0, SEEK_SET);
8153
8154        return fd;
8155    }
8156
8157    return safe_openat(dirfd, path(pathname), flags, mode);
8158}
8159
8160#define TIMER_MAGIC 0x0caf0000
8161#define TIMER_MAGIC_MASK 0xffff0000
8162
8163/* Convert QEMU provided timer ID back to internal 16bit index format */
8164static target_timer_t get_timer_id(abi_long arg)
8165{
8166    target_timer_t timerid = arg;
8167
8168    if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8169        return -TARGET_EINVAL;
8170    }
8171
8172    timerid &= 0xffff;
8173
8174    if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8175        return -TARGET_EINVAL;
8176    }
8177
8178    return timerid;
8179}
8180
8181static int target_to_host_cpu_mask(unsigned long *host_mask,
8182                                   size_t host_size,
8183                                   abi_ulong target_addr,
8184                                   size_t target_size)
8185{
8186    unsigned target_bits = sizeof(abi_ulong) * 8;
8187    unsigned host_bits = sizeof(*host_mask) * 8;
8188    abi_ulong *target_mask;
8189    unsigned i, j;
8190
8191    assert(host_size >= target_size);
8192
8193    target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8194    if (!target_mask) {
8195        return -TARGET_EFAULT;
8196    }
8197    memset(host_mask, 0, host_size);
8198
8199    for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8200        unsigned bit = i * target_bits;
8201        abi_ulong val;
8202
8203        __get_user(val, &target_mask[i]);
8204        for (j = 0; j < target_bits; j++, bit++) {
8205            if (val & (1UL << j)) {
8206                host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8207            }
8208        }
8209    }
8210
8211    unlock_user(target_mask, target_addr, 0);
8212    return 0;
8213}
8214
8215static int host_to_target_cpu_mask(const unsigned long *host_mask,
8216                                   size_t host_size,
8217                                   abi_ulong target_addr,
8218                                   size_t target_size)
8219{
8220    unsigned target_bits = sizeof(abi_ulong) * 8;
8221    unsigned host_bits = sizeof(*host_mask) * 8;
8222    abi_ulong *target_mask;
8223    unsigned i, j;
8224
8225    assert(host_size >= target_size);
8226
8227    target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8228    if (!target_mask) {
8229        return -TARGET_EFAULT;
8230    }
8231
8232    for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8233        unsigned bit = i * target_bits;
8234        abi_ulong val = 0;
8235
8236        for (j = 0; j < target_bits; j++, bit++) {
8237            if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8238                val |= 1UL << j;
8239            }
8240        }
8241        __put_user(val, &target_mask[i]);
8242    }
8243
8244    unlock_user(target_mask, target_addr, target_size);
8245    return 0;
8246}
8247
8248/* This is an internal helper for do_syscall so that it is easier
8249 * to have a single return point, so that actions, such as logging
8250 * of syscall results, can be performed.
8251 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8252 */
8253static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8254                            abi_long arg2, abi_long arg3, abi_long arg4,
8255                            abi_long arg5, abi_long arg6, abi_long arg7,
8256                            abi_long arg8)
8257{
8258    CPUState *cpu = env_cpu(cpu_env);
8259    abi_long ret;
8260#if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8261    || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8262    || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8263    || defined(TARGET_NR_statx)
8264    struct stat st;
8265#endif
8266#if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8267    || defined(TARGET_NR_fstatfs)
8268    struct statfs stfs;
8269#endif
8270    void *p;
8271
8272    switch(num) {
8273    case TARGET_NR_exit:
8274        /* In old applications this may be used to implement _exit(2).
8275           However in threaded applications it is used for thread termination,
8276           and _exit_group is used for application termination.
8277           Do thread termination if we have more then one thread.  */
8278
8279        if (block_signals()) {
8280            return -TARGET_ERESTARTSYS;
8281        }
8282
8283        pthread_mutex_lock(&clone_lock);
8284
8285        if (CPU_NEXT(first_cpu)) {
8286            TaskState *ts = cpu->opaque;
8287
8288            object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8289            object_unref(OBJECT(cpu));
8290            /*
8291             * At this point the CPU should be unrealized and removed
8292             * from cpu lists. We can clean-up the rest of the thread
8293             * data without the lock held.
8294             */
8295
8296            pthread_mutex_unlock(&clone_lock);
8297
8298            if (ts->child_tidptr) {
8299                put_user_u32(0, ts->child_tidptr);
8300                do_sys_futex(g2h(cpu, ts->child_tidptr),
8301                             FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8302            }
8303            thread_cpu = NULL;
8304            g_free(ts);
8305            rcu_unregister_thread();
8306            pthread_exit(NULL);
8307        }
8308
8309        pthread_mutex_unlock(&clone_lock);
8310        preexit_cleanup(cpu_env, arg1);
8311        _exit(arg1);
8312        return 0; /* avoid warning */
8313    case TARGET_NR_read:
8314        if (arg2 == 0 && arg3 == 0) {
8315            return get_errno(safe_read(arg1, 0, 0));
8316        } else {
8317            if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8318                return -TARGET_EFAULT;
8319            ret = get_errno(safe_read(arg1, p, arg3));
8320            if (ret >= 0 &&
8321                fd_trans_host_to_target_data(arg1)) {
8322                ret = fd_trans_host_to_target_data(arg1)(p, ret);
8323            }
8324            unlock_user(p, arg2, ret);
8325        }
8326        return ret;
8327    case TARGET_NR_write:
8328        if (arg2 == 0 && arg3 == 0) {
8329            return get_errno(safe_write(arg1, 0, 0));
8330        }
8331        if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8332            return -TARGET_EFAULT;
8333        if (fd_trans_target_to_host_data(arg1)) {
8334            void *copy = g_malloc(arg3);
8335            memcpy(copy, p, arg3);
8336            ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8337            if (ret >= 0) {
8338                ret = get_errno(safe_write(arg1, copy, ret));
8339            }
8340            g_free(copy);
8341        } else {
8342            ret = get_errno(safe_write(arg1, p, arg3));
8343        }
8344        unlock_user(p, arg2, 0);
8345        return ret;
8346
8347#ifdef TARGET_NR_open
8348    case TARGET_NR_open:
8349        if (!(p = lock_user_string(arg1)))
8350            return -TARGET_EFAULT;
8351        ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8352                                  target_to_host_bitmask(arg2, fcntl_flags_tbl),
8353                                  arg3));
8354        fd_trans_unregister(ret);
8355        unlock_user(p, arg1, 0);
8356        return ret;
8357#endif
8358    case TARGET_NR_openat:
8359        if (!(p = lock_user_string(arg2)))
8360            return -TARGET_EFAULT;
8361        ret = get_errno(do_openat(cpu_env, arg1, p,
8362                                  target_to_host_bitmask(arg3, fcntl_flags_tbl),
8363                                  arg4));
8364        fd_trans_unregister(ret);
8365        unlock_user(p, arg2, 0);
8366        return ret;
8367#if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8368    case TARGET_NR_name_to_handle_at:
8369        ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8370        return ret;
8371#endif
8372#if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8373    case TARGET_NR_open_by_handle_at:
8374        ret = do_open_by_handle_at(arg1, arg2, arg3);
8375        fd_trans_unregister(ret);
8376        return ret;
8377#endif
8378    case TARGET_NR_close:
8379        fd_trans_unregister(arg1);
8380        return get_errno(close(arg1));
8381
8382    case TARGET_NR_brk:
8383        return do_brk(arg1);
8384#ifdef TARGET_NR_fork
8385    case TARGET_NR_fork:
8386        return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8387#endif
8388#ifdef TARGET_NR_waitpid
8389    case TARGET_NR_waitpid:
8390        {
8391            int status;
8392            ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8393            if (!is_error(ret) && arg2 && ret
8394                && put_user_s32(host_to_target_waitstatus(status), arg2))
8395                return -TARGET_EFAULT;
8396        }
8397        return ret;
8398#endif
8399#ifdef TARGET_NR_waitid
8400    case TARGET_NR_waitid:
8401        {
8402            siginfo_t info;
8403            info.si_pid = 0;
8404            ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8405            if (!is_error(ret) && arg3 && info.si_pid != 0) {
8406                if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8407                    return -TARGET_EFAULT;
8408                host_to_target_siginfo(p, &info);
8409                unlock_user(p, arg3, sizeof(target_siginfo_t));
8410            }
8411        }
8412        return ret;
8413#endif
8414#ifdef TARGET_NR_creat /* not on alpha */
8415    case TARGET_NR_creat:
8416        if (!(p = lock_user_string(arg1)))
8417            return -TARGET_EFAULT;
8418        ret = get_errno(creat(p, arg2));
8419        fd_trans_unregister(ret);
8420        unlock_user(p, arg1, 0);
8421        return ret;
8422#endif
8423#ifdef TARGET_NR_link
8424    case TARGET_NR_link:
8425        {
8426            void * p2;
8427            p = lock_user_string(arg1);
8428            p2 = lock_user_string(arg2);
8429            if (!p || !p2)
8430                ret = -TARGET_EFAULT;
8431            else
8432                ret = get_errno(link(p, p2));
8433            unlock_user(p2, arg2, 0);
8434            unlock_user(p, arg1, 0);
8435        }
8436        return ret;
8437#endif
8438#if defined(TARGET_NR_linkat)
8439    case TARGET_NR_linkat:
8440        {
8441            void * p2 = NULL;
8442            if (!arg2 || !arg4)
8443                return -TARGET_EFAULT;
8444            p  = lock_user_string(arg2);
8445            p2 = lock_user_string(arg4);
8446            if (!p || !p2)
8447                ret = -TARGET_EFAULT;
8448            else
8449                ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8450            unlock_user(p, arg2, 0);
8451            unlock_user(p2, arg4, 0);
8452        }
8453        return ret;
8454#endif
8455#ifdef TARGET_NR_unlink
8456    case TARGET_NR_unlink:
8457        if (!(p = lock_user_string(arg1)))
8458            return -TARGET_EFAULT;
8459        ret = get_errno(unlink(p));
8460        unlock_user(p, arg1, 0);
8461        return ret;
8462#endif
8463#if defined(TARGET_NR_unlinkat)
8464    case TARGET_NR_unlinkat:
8465        if (!(p = lock_user_string(arg2)))
8466            return -TARGET_EFAULT;
8467        ret = get_errno(unlinkat(arg1, p, arg3));
8468        unlock_user(p, arg2, 0);
8469        return ret;
8470#endif
8471    case TARGET_NR_execve:
8472        {
8473            char **argp, **envp;
8474            int argc, envc;
8475            abi_ulong gp;
8476            abi_ulong guest_argp;
8477            abi_ulong guest_envp;
8478            abi_ulong addr;
8479            char **q;
8480            int total_size = 0;
8481
8482            argc = 0;
8483            guest_argp = arg2;
8484            for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8485                if (get_user_ual(addr, gp))
8486                    return -TARGET_EFAULT;
8487                if (!addr)
8488                    break;
8489                argc++;
8490            }
8491            envc = 0;
8492            guest_envp = arg3;
8493            for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8494                if (get_user_ual(addr, gp))
8495                    return -TARGET_EFAULT;
8496                if (!addr)
8497                    break;
8498                envc++;
8499            }
8500
8501            argp = g_new0(char *, argc + 1);
8502            envp = g_new0(char *, envc + 1);
8503
8504            for (gp = guest_argp, q = argp; gp;
8505                  gp += sizeof(abi_ulong), q++) {
8506                if (get_user_ual(addr, gp))
8507                    goto execve_efault;
8508                if (!addr)
8509                    break;
8510                if (!(*q = lock_user_string(addr)))
8511                    goto execve_efault;
8512                total_size += strlen(*q) + 1;
8513            }
8514            *q = NULL;
8515
8516            for (gp = guest_envp, q = envp; gp;
8517                  gp += sizeof(abi_ulong), q++) {
8518                if (get_user_ual(addr, gp))
8519                    goto execve_efault;
8520                if (!addr)
8521                    break;
8522                if (!(*q = lock_user_string(addr)))
8523                    goto execve_efault;
8524                total_size += strlen(*q) + 1;
8525            }
8526            *q = NULL;
8527
8528            if (!(p = lock_user_string(arg1)))
8529                goto execve_efault;
8530            /* Although execve() is not an interruptible syscall it is
8531             * a special case where we must use the safe_syscall wrapper:
8532             * if we allow a signal to happen before we make the host
8533             * syscall then we will 'lose' it, because at the point of
8534             * execve the process leaves QEMU's control. So we use the
8535             * safe syscall wrapper to ensure that we either take the
8536             * signal as a guest signal, or else it does not happen
8537             * before the execve completes and makes it the other
8538             * program's problem.
8539             */
8540            ret = get_errno(safe_execve(p, argp, envp));
8541            unlock_user(p, arg1, 0);
8542
8543            goto execve_end;
8544
8545        execve_efault:
8546            ret = -TARGET_EFAULT;
8547
8548        execve_end:
8549            for (gp = guest_argp, q = argp; *q;
8550                  gp += sizeof(abi_ulong), q++) {
8551                if (get_user_ual(addr, gp)
8552                    || !addr)
8553                    break;
8554                unlock_user(*q, addr, 0);
8555            }
8556            for (gp = guest_envp, q = envp; *q;
8557                  gp += sizeof(abi_ulong), q++) {
8558                if (get_user_ual(addr, gp)
8559                    || !addr)
8560                    break;
8561                unlock_user(*q, addr, 0);
8562            }
8563
8564            g_free(argp);
8565            g_free(envp);
8566        }
8567        return ret;
8568    case TARGET_NR_chdir:
8569        if (!(p = lock_user_string(arg1)))
8570            return -TARGET_EFAULT;
8571        ret = get_errno(chdir(p));
8572        unlock_user(p, arg1, 0);
8573        return ret;
8574#ifdef TARGET_NR_time
8575    case TARGET_NR_time:
8576        {
8577            time_t host_time;
8578            ret = get_errno(time(&host_time));
8579            if (!is_error(ret)
8580                && arg1
8581                && put_user_sal(host_time, arg1))
8582                return -TARGET_EFAULT;
8583        }
8584        return ret;
8585#endif
8586#ifdef TARGET_NR_mknod
8587    case TARGET_NR_mknod:
8588        if (!(p = lock_user_string(arg1)))
8589            return -TARGET_EFAULT;
8590        ret = get_errno(mknod(p, arg2, arg3));
8591        unlock_user(p, arg1, 0);
8592        return ret;
8593#endif
8594#if defined(TARGET_NR_mknodat)
8595    case TARGET_NR_mknodat:
8596        if (!(p = lock_user_string(arg2)))
8597            return -TARGET_EFAULT;
8598        ret = get_errno(mknodat(arg1, p, arg3, arg4));
8599        unlock_user(p, arg2, 0);
8600        return ret;
8601#endif
8602#ifdef TARGET_NR_chmod
8603    case TARGET_NR_chmod:
8604        if (!(p = lock_user_string(arg1)))
8605            return -TARGET_EFAULT;
8606        ret = get_errno(chmod(p, arg2));
8607        unlock_user(p, arg1, 0);
8608        return ret;
8609#endif
8610#ifdef TARGET_NR_lseek
8611    case TARGET_NR_lseek:
8612        return get_errno(lseek(arg1, arg2, arg3));
8613#endif
8614#if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8615    /* Alpha specific */
8616    case TARGET_NR_getxpid:
8617        ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8618        return get_errno(getpid());
8619#endif
8620#ifdef TARGET_NR_getpid
8621    case TARGET_NR_getpid:
8622        return get_errno(getpid());
8623#endif
8624    case TARGET_NR_mount:
8625        {
8626            /* need to look at the data field */
8627            void *p2, *p3;
8628
8629            if (arg1) {
8630                p = lock_user_string(arg1);
8631                if (!p) {
8632                    return -TARGET_EFAULT;
8633                }
8634            } else {
8635                p = NULL;
8636            }
8637
8638            p2 = lock_user_string(arg2);
8639            if (!p2) {
8640                if (arg1) {
8641                    unlock_user(p, arg1, 0);
8642                }
8643                return -TARGET_EFAULT;
8644            }
8645
8646            if (arg3) {
8647                p3 = lock_user_string(arg3);
8648                if (!p3) {
8649                    if (arg1) {
8650                        unlock_user(p, arg1, 0);
8651                    }
8652                    unlock_user(p2, arg2, 0);
8653                    return -TARGET_EFAULT;
8654                }
8655            } else {
8656                p3 = NULL;
8657            }
8658
8659            /* FIXME - arg5 should be locked, but it isn't clear how to
8660             * do that since it's not guaranteed to be a NULL-terminated
8661             * string.
8662             */
8663            if (!arg5) {
8664                ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8665            } else {
8666                ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8667            }
8668            ret = get_errno(ret);
8669
8670            if (arg1) {
8671                unlock_user(p, arg1, 0);
8672            }
8673            unlock_user(p2, arg2, 0);
8674            if (arg3) {
8675                unlock_user(p3, arg3, 0);
8676            }
8677        }
8678        return ret;
8679#if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8680#if defined(TARGET_NR_umount)
8681    case TARGET_NR_umount:
8682#endif
8683#if defined(TARGET_NR_oldumount)
8684    case TARGET_NR_oldumount:
8685#endif
8686        if (!(p = lock_user_string(arg1)))
8687            return -TARGET_EFAULT;
8688        ret = get_errno(umount(p));
8689        unlock_user(p, arg1, 0);
8690        return ret;
8691#endif
8692#ifdef TARGET_NR_stime /* not on alpha */
8693    case TARGET_NR_stime:
8694        {
8695            struct timespec ts;
8696            ts.tv_nsec = 0;
8697            if (get_user_sal(ts.tv_sec, arg1)) {
8698                return -TARGET_EFAULT;
8699            }
8700            return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8701        }
8702#endif
8703#ifdef TARGET_NR_alarm /* not on alpha */
8704    case TARGET_NR_alarm:
8705        return alarm(arg1);
8706#endif
8707#ifdef TARGET_NR_pause /* not on alpha */
8708    case TARGET_NR_pause:
8709        if (!block_signals()) {
8710            sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8711        }
8712        return -TARGET_EINTR;
8713#endif
8714#ifdef TARGET_NR_utime
8715    case TARGET_NR_utime:
8716        {
8717            struct utimbuf tbuf, *host_tbuf;
8718            struct target_utimbuf *target_tbuf;
8719            if (arg2) {
8720                if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8721                    return -TARGET_EFAULT;
8722                tbuf.actime = tswapal(target_tbuf->actime);
8723                tbuf.modtime = tswapal(target_tbuf->modtime);
8724                unlock_user_struct(target_tbuf, arg2, 0);
8725                host_tbuf = &tbuf;
8726            } else {
8727                host_tbuf = NULL;
8728            }
8729            if (!(p = lock_user_string(arg1)))
8730                return -TARGET_EFAULT;
8731            ret = get_errno(utime(p, host_tbuf));
8732            unlock_user(p, arg1, 0);
8733        }
8734        return ret;
8735#endif
8736#ifdef TARGET_NR_utimes
8737    case TARGET_NR_utimes:
8738        {
8739            struct timeval *tvp, tv[2];
8740            if (arg2) {
8741                if (copy_from_user_timeval(&tv[0], arg2)
8742                    || copy_from_user_timeval(&tv[1],
8743                                              arg2 + sizeof(struct target_timeval)))
8744                    return -TARGET_EFAULT;
8745                tvp = tv;
8746            } else {
8747                tvp = NULL;
8748            }
8749            if (!(p = lock_user_string(arg1)))
8750                return -TARGET_EFAULT;
8751            ret = get_errno(utimes(p, tvp));
8752            unlock_user(p, arg1, 0);
8753        }
8754        return ret;
8755#endif
8756#if defined(TARGET_NR_futimesat)
8757    case TARGET_NR_futimesat:
8758        {
8759            struct timeval *tvp, tv[2];
8760            if (arg3) {
8761                if (copy_from_user_timeval(&tv[0], arg3)
8762                    || copy_from_user_timeval(&tv[1],
8763                                              arg3 + sizeof(struct target_timeval)))
8764                    return -TARGET_EFAULT;
8765                tvp = tv;
8766            } else {
8767                tvp = NULL;
8768            }
8769            if (!(p = lock_user_string(arg2))) {
8770                return -TARGET_EFAULT;
8771            }
8772            ret = get_errno(futimesat(arg1, path(p), tvp));
8773            unlock_user(p, arg2, 0);
8774        }
8775        return ret;
8776#endif
8777#ifdef TARGET_NR_access
8778    case TARGET_NR_access:
8779        if (!(p = lock_user_string(arg1))) {
8780            return -TARGET_EFAULT;
8781        }
8782        ret = get_errno(access(path(p), arg2));
8783        unlock_user(p, arg1, 0);
8784        return ret;
8785#endif
8786#if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8787    case TARGET_NR_faccessat:
8788        if (!(p = lock_user_string(arg2))) {
8789            return -TARGET_EFAULT;
8790        }
8791        ret = get_errno(faccessat(arg1, p, arg3, 0));
8792        unlock_user(p, arg2, 0);
8793        return ret;
8794#endif
8795#ifdef TARGET_NR_nice /* not on alpha */
8796    case TARGET_NR_nice:
8797        return get_errno(nice(arg1));
8798#endif
8799    case TARGET_NR_sync:
8800        sync();
8801        return 0;
8802#if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8803    case TARGET_NR_syncfs:
8804        return get_errno(syncfs(arg1));
8805#endif
8806    case TARGET_NR_kill:
8807        return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8808#ifdef TARGET_NR_rename
8809    case TARGET_NR_rename:
8810        {
8811            void *p2;
8812            p = lock_user_string(arg1);
8813            p2 = lock_user_string(arg2);
8814            if (!p || !p2)
8815                ret = -TARGET_EFAULT;
8816            else
8817                ret = get_errno(rename(p, p2));
8818            unlock_user(p2, arg2, 0);
8819            unlock_user(p, arg1, 0);
8820        }
8821        return ret;
8822#endif
8823#if defined(TARGET_NR_renameat)
8824    case TARGET_NR_renameat:
8825        {
8826            void *p2;
8827            p  = lock_user_string(arg2);
8828            p2 = lock_user_string(arg4);
8829            if (!p || !p2)
8830                ret = -TARGET_EFAULT;
8831            else
8832                ret = get_errno(renameat(arg1, p, arg3, p2));
8833            unlock_user(p2, arg4, 0);
8834            unlock_user(p, arg2, 0);
8835        }
8836        return ret;
8837#endif
8838#if defined(TARGET_NR_renameat2)
8839    case TARGET_NR_renameat2:
8840        {
8841            void *p2;
8842            p  = lock_user_string(arg2);
8843            p2 = lock_user_string(arg4);
8844            if (!p || !p2) {
8845                ret = -TARGET_EFAULT;
8846            } else {
8847                ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8848            }
8849            unlock_user(p2, arg4, 0);
8850            unlock_user(p, arg2, 0);
8851        }
8852        return ret;
8853#endif
8854#ifdef TARGET_NR_mkdir
8855    case TARGET_NR_mkdir:
8856        if (!(p = lock_user_string(arg1)))
8857            return -TARGET_EFAULT;
8858        ret = get_errno(mkdir(p, arg2));
8859        unlock_user(p, arg1, 0);
8860        return ret;
8861#endif
8862#if defined(TARGET_NR_mkdirat)
8863    case TARGET_NR_mkdirat:
8864        if (!(p = lock_user_string(arg2)))
8865            return -TARGET_EFAULT;
8866        ret = get_errno(mkdirat(arg1, p, arg3));
8867        unlock_user(p, arg2, 0);
8868        return ret;
8869#endif
8870#ifdef TARGET_NR_rmdir
8871    case TARGET_NR_rmdir:
8872        if (!(p = lock_user_string(arg1)))
8873            return -TARGET_EFAULT;
8874        ret = get_errno(rmdir(p));
8875        unlock_user(p, arg1, 0);
8876        return ret;
8877#endif
8878    case TARGET_NR_dup:
8879        ret = get_errno(dup(arg1));
8880        if (ret >= 0) {
8881            fd_trans_dup(arg1, ret);
8882        }
8883        return ret;
8884#ifdef TARGET_NR_pipe
8885    case TARGET_NR_pipe:
8886        return do_pipe(cpu_env, arg1, 0, 0);
8887#endif
8888#ifdef TARGET_NR_pipe2
8889    case TARGET_NR_pipe2:
8890        return do_pipe(cpu_env, arg1,
8891                       target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8892#endif
8893    case TARGET_NR_times:
8894        {
8895            struct target_tms *tmsp;
8896            struct tms tms;
8897            ret = get_errno(times(&tms));
8898            if (arg1) {
8899                tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8900                if (!tmsp)
8901                    return -TARGET_EFAULT;
8902                tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8903                tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8904                tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8905                tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8906            }
8907            if (!is_error(ret))
8908                ret = host_to_target_clock_t(ret);
8909        }
8910        return ret;
8911    case TARGET_NR_acct:
8912        if (arg1 == 0) {
8913            ret = get_errno(acct(NULL));
8914        } else {
8915            if (!(p = lock_user_string(arg1))) {
8916                return -TARGET_EFAULT;
8917            }
8918            ret = get_errno(acct(path(p)));
8919            unlock_user(p, arg1, 0);
8920        }
8921        return ret;
8922#ifdef TARGET_NR_umount2
8923    case TARGET_NR_umount2:
8924        if (!(p = lock_user_string(arg1)))
8925            return -TARGET_EFAULT;
8926        ret = get_errno(umount2(p, arg2));
8927        unlock_user(p, arg1, 0);
8928        return ret;
8929#endif
8930    case TARGET_NR_ioctl:
8931        return do_ioctl(arg1, arg2, arg3);
8932#ifdef TARGET_NR_fcntl
8933    case TARGET_NR_fcntl:
8934        return do_fcntl(arg1, arg2, arg3);
8935#endif
8936    case TARGET_NR_setpgid:
8937        return get_errno(setpgid(arg1, arg2));
8938    case TARGET_NR_umask:
8939        return get_errno(umask(arg1));
8940    case TARGET_NR_chroot:
8941        if (!(p = lock_user_string(arg1)))
8942            return -TARGET_EFAULT;
8943        ret = get_errno(chroot(p));
8944        unlock_user(p, arg1, 0);
8945        return ret;
8946#ifdef TARGET_NR_dup2
8947    case TARGET_NR_dup2:
8948        ret = get_errno(dup2(arg1, arg2));
8949        if (ret >= 0) {
8950            fd_trans_dup(arg1, arg2);
8951        }
8952        return ret;
8953#endif
8954#if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8955    case TARGET_NR_dup3:
8956    {
8957        int host_flags;
8958
8959        if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8960            return -EINVAL;
8961        }
8962        host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8963        ret = get_errno(dup3(arg1, arg2, host_flags));
8964        if (ret >= 0) {
8965            fd_trans_dup(arg1, arg2);
8966        }
8967        return ret;
8968    }
8969#endif
8970#ifdef TARGET_NR_getppid /* not on alpha */
8971    case TARGET_NR_getppid:
8972        return get_errno(getppid());
8973#endif
8974#ifdef TARGET_NR_getpgrp
8975    case TARGET_NR_getpgrp:
8976        return get_errno(getpgrp());
8977#endif
8978    case TARGET_NR_setsid:
8979        return get_errno(setsid());
8980#ifdef TARGET_NR_sigaction
8981    case TARGET_NR_sigaction:
8982        {
8983#if defined(TARGET_ALPHA)
8984            struct target_sigaction act, oact, *pact = 0;
8985            struct target_old_sigaction *old_act;
8986            if (arg2) {
8987                if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8988                    return -TARGET_EFAULT;
8989                act._sa_handler = old_act->_sa_handler;
8990                target_siginitset(&act.sa_mask, old_act->sa_mask);
8991                act.sa_flags = old_act->sa_flags;
8992                act.sa_restorer = 0;
8993                unlock_user_struct(old_act, arg2, 0);
8994                pact = &act;
8995            }
8996            ret = get_errno(do_sigaction(arg1, pact, &oact));
8997            if (!is_error(ret) && arg3) {
8998                if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8999                    return -TARGET_EFAULT;
9000                old_act->_sa_handler = oact._sa_handler;
9001                old_act->sa_mask = oact.sa_mask.sig[0];
9002                old_act->sa_flags = oact.sa_flags;
9003                unlock_user_struct(old_act, arg3, 1);
9004            }
9005#elif defined(TARGET_MIPS)
9006            struct target_sigaction act, oact, *pact, *old_act;
9007
9008            if (arg2) {
9009                if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9010                    return -TARGET_EFAULT;
9011                act._sa_handler = old_act->_sa_handler;
9012                target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9013                act.sa_flags = old_act->sa_flags;
9014                unlock_user_struct(old_act, arg2, 0);
9015                pact = &act;
9016            } else {
9017                pact = NULL;
9018            }
9019
9020            ret = get_errno(do_sigaction(arg1, pact, &oact));
9021
9022            if (!is_error(ret) && arg3) {
9023                if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9024                    return -TARGET_EFAULT;
9025                old_act->_sa_handler = oact._sa_handler;
9026                old_act->sa_flags = oact.sa_flags;
9027                old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9028                old_act->sa_mask.sig[1] = 0;
9029                old_act->sa_mask.sig[2] = 0;
9030                old_act->sa_mask.sig[3] = 0;
9031                unlock_user_struct(old_act, arg3, 1);
9032            }
9033#else
9034            struct target_old_sigaction *old_act;
9035            struct target_sigaction act, oact, *pact;
9036            if (arg2) {
9037                if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9038                    return -TARGET_EFAULT;
9039                act._sa_handler = old_act->_sa_handler;
9040                target_siginitset(&act.sa_mask, old_act->sa_mask);
9041                act.sa_flags = old_act->sa_flags;
9042                act.sa_restorer = old_act->sa_restorer;
9043#ifdef TARGET_ARCH_HAS_KA_RESTORER
9044                act.ka_restorer = 0;
9045#endif
9046                unlock_user_struct(old_act, arg2, 0);
9047                pact = &act;
9048            } else {
9049                pact = NULL;
9050            }
9051            ret = get_errno(do_sigaction(arg1, pact, &oact));
9052            if (!is_error(ret) && arg3) {
9053                if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9054                    return -TARGET_EFAULT;
9055                old_act->_sa_handler = oact._sa_handler;
9056                old_act->sa_mask = oact.sa_mask.sig[0];
9057                old_act->sa_flags = oact.sa_flags;
9058                old_act->sa_restorer = oact.sa_restorer;
9059                unlock_user_struct(old_act, arg3, 1);
9060            }
9061#endif
9062        }
9063        return ret;
9064#endif
9065    case TARGET_NR_rt_sigaction:
9066        {
9067#if defined(TARGET_ALPHA)
9068            /* For Alpha and SPARC this is a 5 argument syscall, with
9069             * a 'restorer' parameter which must be copied into the
9070             * sa_restorer field of the sigaction struct.
9071             * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9072             * and arg5 is the sigsetsize.
9073             * Alpha also has a separate rt_sigaction struct that it uses
9074             * here; SPARC uses the usual sigaction struct.
9075             */
9076            struct target_rt_sigaction *rt_act;
9077            struct target_sigaction act, oact, *pact = 0;
9078
9079            if (arg4 != sizeof(target_sigset_t)) {
9080                return -TARGET_EINVAL;
9081            }
9082            if (arg2) {
9083                if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9084                    return -TARGET_EFAULT;
9085                act._sa_handler = rt_act->_sa_handler;
9086                act.sa_mask = rt_act->sa_mask;
9087                act.sa_flags = rt_act->sa_flags;
9088                act.sa_restorer = arg5;
9089                unlock_user_struct(rt_act, arg2, 0);
9090                pact = &act;
9091            }
9092            ret = get_errno(do_sigaction(arg1, pact, &oact));
9093            if (!is_error(ret) && arg3) {
9094                if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9095                    return -TARGET_EFAULT;
9096                rt_act->_sa_handler = oact._sa_handler;
9097                rt_act->sa_mask = oact.sa_mask;
9098                rt_act->sa_flags = oact.sa_flags;
9099                unlock_user_struct(rt_act, arg3, 1);
9100            }
9101#else
9102#ifdef TARGET_SPARC
9103            target_ulong restorer = arg4;
9104            target_ulong sigsetsize = arg5;
9105#else
9106            target_ulong sigsetsize = arg4;
9107#endif
9108            struct target_sigaction *act;
9109            struct target_sigaction *oact;
9110
9111            if (sigsetsize != sizeof(target_sigset_t)) {
9112                return -TARGET_EINVAL;
9113            }
9114            if (arg2) {
9115                if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9116                    return -TARGET_EFAULT;
9117                }
9118#ifdef TARGET_ARCH_HAS_KA_RESTORER
9119                act->ka_restorer = restorer;
9120#endif
9121            } else {
9122                act = NULL;
9123            }
9124            if (arg3) {
9125                if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9126                    ret = -TARGET_EFAULT;
9127                    goto rt_sigaction_fail;
9128                }
9129            } else
9130                oact = NULL;
9131            ret = get_errno(do_sigaction(arg1, act, oact));
9132        rt_sigaction_fail:
9133            if (act)
9134                unlock_user_struct(act, arg2, 0);
9135            if (oact)
9136                unlock_user_struct(oact, arg3, 1);
9137#endif
9138        }
9139        return ret;
9140#ifdef TARGET_NR_sgetmask /* not on alpha */
9141    case TARGET_NR_sgetmask:
9142        {
9143            sigset_t cur_set;
9144            abi_ulong target_set;
9145            ret = do_sigprocmask(0, NULL, &cur_set);
9146            if (!ret) {
9147                host_to_target_old_sigset(&target_set, &cur_set);
9148                ret = target_set;
9149            }
9150        }
9151        return ret;
9152#endif
9153#ifdef TARGET_NR_ssetmask /* not on alpha */
9154    case TARGET_NR_ssetmask:
9155        {
9156            sigset_t set, oset;
9157            abi_ulong target_set = arg1;
9158            target_to_host_old_sigset(&set, &target_set);
9159            ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9160            if (!ret) {
9161                host_to_target_old_sigset(&target_set, &oset);
9162                ret = target_set;
9163            }
9164        }
9165        return ret;
9166#endif
9167#ifdef TARGET_NR_sigprocmask
9168    case TARGET_NR_sigprocmask:
9169        {
9170#if defined(TARGET_ALPHA)
9171            sigset_t set, oldset;
9172            abi_ulong mask;
9173            int how;
9174
9175            switch (arg1) {
9176            case TARGET_SIG_BLOCK:
9177                how = SIG_BLOCK;
9178                break;
9179            case TARGET_SIG_UNBLOCK:
9180                how = SIG_UNBLOCK;
9181                break;
9182            case TARGET_SIG_SETMASK:
9183                how = SIG_SETMASK;
9184                break;
9185            default:
9186                return -TARGET_EINVAL;
9187            }
9188            mask = arg2;
9189            target_to_host_old_sigset(&set, &mask);
9190
9191            ret = do_sigprocmask(how, &set, &oldset);
9192            if (!is_error(ret)) {
9193                host_to_target_old_sigset(&mask, &oldset);
9194                ret = mask;
9195                ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9196            }
9197#else
9198            sigset_t set, oldset, *set_ptr;
9199            int how;
9200
9201            if (arg2) {
9202                switch (arg1) {
9203                case TARGET_SIG_BLOCK:
9204                    how = SIG_BLOCK;
9205                    break;
9206                case TARGET_SIG_UNBLOCK:
9207                    how = SIG_UNBLOCK;
9208                    break;
9209                case TARGET_SIG_SETMASK:
9210                    how = SIG_SETMASK;
9211                    break;
9212                default:
9213                    return -TARGET_EINVAL;
9214                }
9215                if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9216                    return -TARGET_EFAULT;
9217                target_to_host_old_sigset(&set, p);
9218                unlock_user(p, arg2, 0);
9219                set_ptr = &set;
9220            } else {
9221                how = 0;
9222                set_ptr = NULL;
9223            }
9224            ret = do_sigprocmask(how, set_ptr, &oldset);
9225            if (!is_error(ret) && arg3) {
9226                if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9227                    return -TARGET_EFAULT;
9228                host_to_target_old_sigset(p, &oldset);
9229                unlock_user(p, arg3, sizeof(target_sigset_t));
9230            }
9231#endif
9232        }
9233        return ret;
9234#endif
9235    case TARGET_NR_rt_sigprocmask:
9236        {
9237            int how = arg1;
9238            sigset_t set, oldset, *set_ptr;
9239
9240            if (arg4 != sizeof(target_sigset_t)) {
9241                return -TARGET_EINVAL;
9242            }
9243
9244            if (arg2) {
9245                switch(how) {
9246                case TARGET_SIG_BLOCK:
9247                    how = SIG_BLOCK;
9248                    break;
9249                case TARGET_SIG_UNBLOCK:
9250                    how = SIG_UNBLOCK;
9251                    break;
9252                case TARGET_SIG_SETMASK:
9253                    how = SIG_SETMASK;
9254                    break;
9255                default:
9256                    return -TARGET_EINVAL;
9257                }
9258                if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9259                    return -TARGET_EFAULT;
9260                target_to_host_sigset(&set, p);
9261                unlock_user(p, arg2, 0);
9262                set_ptr = &set;
9263            } else {
9264                how = 0;
9265                set_ptr = NULL;
9266            }
9267            ret = do_sigprocmask(how, set_ptr, &oldset);
9268            if (!is_error(ret) && arg3) {
9269                if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9270                    return -TARGET_EFAULT;
9271                host_to_target_sigset(p, &oldset);
9272                unlock_user(p, arg3, sizeof(target_sigset_t));
9273            }
9274        }
9275        return ret;
9276#ifdef TARGET_NR_sigpending
9277    case TARGET_NR_sigpending:
9278        {
9279            sigset_t set;
9280            ret = get_errno(sigpending(&set));
9281            if (!is_error(ret)) {
9282                if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9283                    return -TARGET_EFAULT;
9284                host_to_target_old_sigset(p, &set);
9285                unlock_user(p, arg1, sizeof(target_sigset_t));
9286            }
9287        }
9288        return ret;
9289#endif
9290    case TARGET_NR_rt_sigpending:
9291        {
9292            sigset_t set;
9293
9294            /* Yes, this check is >, not != like most. We follow the kernel's
9295             * logic and it does it like this because it implements
9296             * NR_sigpending through the same code path, and in that case
9297             * the old_sigset_t is smaller in size.
9298             */
9299            if (arg2 > sizeof(target_sigset_t)) {
9300                return -TARGET_EINVAL;
9301            }
9302
9303            ret = get_errno(sigpending(&set));
9304            if (!is_error(ret)) {
9305                if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9306                    return -TARGET_EFAULT;
9307                host_to_target_sigset(p, &set);
9308                unlock_user(p, arg1, sizeof(target_sigset_t));
9309            }
9310        }
9311        return ret;
9312#ifdef TARGET_NR_sigsuspend
9313    case TARGET_NR_sigsuspend:
9314        {
9315            TaskState *ts = cpu->opaque;
9316#if defined(TARGET_ALPHA)
9317            abi_ulong mask = arg1;
9318            target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9319#else
9320            if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9321                return -TARGET_EFAULT;
9322            target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9323            unlock_user(p, arg1, 0);
9324#endif
9325            ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9326                                               SIGSET_T_SIZE));
9327            if (ret != -TARGET_ERESTARTSYS) {
9328                ts->in_sigsuspend = 1;
9329            }
9330        }
9331        return ret;
9332#endif
9333    case TARGET_NR_rt_sigsuspend:
9334        {
9335            TaskState *ts = cpu->opaque;
9336
9337            if (arg2 != sizeof(target_sigset_t)) {
9338                return -TARGET_EINVAL;
9339            }
9340            if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9341                return -TARGET_EFAULT;
9342            target_to_host_sigset(&ts->sigsuspend_mask, p);
9343            unlock_user(p, arg1, 0);
9344            ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9345                                               SIGSET_T_SIZE));
9346            if (ret != -TARGET_ERESTARTSYS) {
9347                ts->in_sigsuspend = 1;
9348            }
9349        }
9350        return ret;
9351#ifdef TARGET_NR_rt_sigtimedwait
9352    case TARGET_NR_rt_sigtimedwait:
9353        {
9354            sigset_t set;
9355            struct timespec uts, *puts;
9356            siginfo_t uinfo;
9357
9358            if (arg4 != sizeof(target_sigset_t)) {
9359                return -TARGET_EINVAL;
9360            }
9361
9362            if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9363                return -TARGET_EFAULT;
9364            target_to_host_sigset(&set, p);
9365            unlock_user(p, arg1, 0);
9366            if (arg3) {
9367                puts = &uts;
9368                if (target_to_host_timespec(puts, arg3)) {
9369                    return -TARGET_EFAULT;
9370                }
9371            } else {
9372                puts = NULL;
9373            }
9374            ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9375                                                 SIGSET_T_SIZE));
9376            if (!is_error(ret)) {
9377                if (arg2) {
9378                    p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9379                                  0);
9380                    if (!p) {
9381                        return -TARGET_EFAULT;
9382                    }
9383                    host_to_target_siginfo(p, &uinfo);
9384                    unlock_user(p, arg2, sizeof(target_siginfo_t));
9385                }
9386                ret = host_to_target_signal(ret);
9387            }
9388        }
9389        return ret;
9390#endif
9391#ifdef TARGET_NR_rt_sigtimedwait_time64
9392    case TARGET_NR_rt_sigtimedwait_time64:
9393        {
9394            sigset_t set;
9395            struct timespec uts, *puts;
9396            siginfo_t uinfo;
9397
9398            if (arg4 != sizeof(target_sigset_t)) {
9399                return -TARGET_EINVAL;
9400            }
9401
9402            p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9403            if (!p) {
9404                return -TARGET_EFAULT;
9405            }
9406            target_to_host_sigset(&set, p);
9407            unlock_user(p, arg1, 0);
9408            if (arg3) {
9409                puts = &uts;
9410                if (target_to_host_timespec64(puts, arg3)) {
9411                    return -TARGET_EFAULT;
9412                }
9413            } else {
9414                puts = NULL;
9415            }
9416            ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9417                                                 SIGSET_T_SIZE));
9418            if (!is_error(ret)) {
9419                if (arg2) {
9420                    p = lock_user(VERIFY_WRITE, arg2,
9421                                  sizeof(target_siginfo_t), 0);
9422                    if (!p) {
9423                        return -TARGET_EFAULT;
9424                    }
9425                    host_to_target_siginfo(p, &uinfo);
9426                    unlock_user(p, arg2, sizeof(target_siginfo_t));
9427                }
9428                ret = host_to_target_signal(ret);
9429            }
9430        }
9431        return ret;
9432#endif
9433    case TARGET_NR_rt_sigqueueinfo:
9434        {
9435            siginfo_t uinfo;
9436
9437            p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9438            if (!p) {
9439                return -TARGET_EFAULT;
9440            }
9441            target_to_host_siginfo(&uinfo, p);
9442            unlock_user(p, arg3, 0);
9443            ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9444        }
9445        return ret;
9446    case TARGET_NR_rt_tgsigqueueinfo:
9447        {
9448            siginfo_t uinfo;
9449
9450            p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9451            if (!p) {
9452                return -TARGET_EFAULT;
9453            }
9454            target_to_host_siginfo(&uinfo, p);
9455            unlock_user(p, arg4, 0);
9456            ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9457        }
9458        return ret;
9459#ifdef TARGET_NR_sigreturn
9460    case TARGET_NR_sigreturn:
9461        if (block_signals()) {
9462            return -TARGET_ERESTARTSYS;
9463        }
9464        return do_sigreturn(cpu_env);
9465#endif
9466    case TARGET_NR_rt_sigreturn:
9467        if (block_signals()) {
9468            return -TARGET_ERESTARTSYS;
9469        }
9470        return do_rt_sigreturn(cpu_env);
9471    case TARGET_NR_sethostname:
9472        if (!(p = lock_user_string(arg1)))
9473            return -TARGET_EFAULT;
9474        ret = get_errno(sethostname(p, arg2));
9475        unlock_user(p, arg1, 0);
9476        return ret;
9477#ifdef TARGET_NR_setrlimit
9478    case TARGET_NR_setrlimit:
9479        {
9480            int resource = target_to_host_resource(arg1);
9481            struct target_rlimit *target_rlim;
9482            struct rlimit rlim;
9483            if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9484                return -TARGET_EFAULT;
9485            rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9486            rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9487            unlock_user_struct(target_rlim, arg2, 0);
9488            /*
9489             * If we just passed through resource limit settings for memory then
9490             * they would also apply to QEMU's own allocations, and QEMU will
9491             * crash or hang or die if its allocations fail. Ideally we would
9492             * track the guest allocations in QEMU and apply the limits ourselves.
9493             * For now, just tell the guest the call succeeded but don't actually
9494             * limit anything.
9495             */
9496            if (resource != RLIMIT_AS &&
9497                resource != RLIMIT_DATA &&
9498                resource != RLIMIT_STACK) {
9499                return get_errno(setrlimit(resource, &rlim));
9500            } else {
9501                return 0;
9502            }
9503        }
9504#endif
9505#ifdef TARGET_NR_getrlimit
9506    case TARGET_NR_getrlimit:
9507        {
9508            int resource = target_to_host_resource(arg1);
9509            struct target_rlimit *target_rlim;
9510            struct rlimit rlim;
9511
9512            ret = get_errno(getrlimit(resource, &rlim));
9513            if (!is_error(ret)) {
9514                if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9515                    return -TARGET_EFAULT;
9516                target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9517                target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9518                unlock_user_struct(target_rlim, arg2, 1);
9519            }
9520        }
9521        return ret;
9522#endif
9523    case TARGET_NR_getrusage:
9524        {
9525            struct rusage rusage;
9526            ret = get_errno(getrusage(arg1, &rusage));
9527            if (!is_error(ret)) {
9528                ret = host_to_target_rusage(arg2, &rusage);
9529            }
9530        }
9531        return ret;
9532#if defined(TARGET_NR_gettimeofday)
9533    case TARGET_NR_gettimeofday:
9534        {
9535            struct timeval tv;
9536            struct timezone tz;
9537
9538            ret = get_errno(gettimeofday(&tv, &tz));
9539            if (!is_error(ret)) {
9540                if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9541                    return -TARGET_EFAULT;
9542                }
9543                if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9544                    return -TARGET_EFAULT;
9545                }
9546            }
9547        }
9548        return ret;
9549#endif
9550#if defined(TARGET_NR_settimeofday)
9551    case TARGET_NR_settimeofday:
9552        {
9553            struct timeval tv, *ptv = NULL;
9554            struct timezone tz, *ptz = NULL;
9555
9556            if (arg1) {
9557                if (copy_from_user_timeval(&tv, arg1)) {
9558                    return -TARGET_EFAULT;
9559                }
9560                ptv = &tv;
9561            }
9562
9563            if (arg2) {
9564                if (copy_from_user_timezone(&tz, arg2)) {
9565                    return -TARGET_EFAULT;
9566                }
9567                ptz = &tz;
9568            }
9569
9570            return get_errno(settimeofday(ptv, ptz));
9571        }
9572#endif
9573#if defined(TARGET_NR_select)
9574    case TARGET_NR_select:
9575#if defined(TARGET_WANT_NI_OLD_SELECT)
9576        /* some architectures used to have old_select here
9577         * but now ENOSYS it.
9578         */
9579        ret = -TARGET_ENOSYS;
9580#elif defined(TARGET_WANT_OLD_SYS_SELECT)
9581        ret = do_old_select(arg1);
9582#else
9583        ret = do_select(arg1, arg2, arg3, arg4, arg5);
9584#endif
9585        return ret;
9586#endif
9587#ifdef TARGET_NR_pselect6
9588    case TARGET_NR_pselect6:
9589        return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9590#endif
9591#ifdef TARGET_NR_pselect6_time64
9592    case TARGET_NR_pselect6_time64:
9593        return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9594#endif
9595#ifdef TARGET_NR_symlink
9596    case TARGET_NR_symlink:
9597        {
9598            void *p2;
9599            p = lock_user_string(arg1);
9600            p2 = lock_user_string(arg2);
9601            if (!p || !p2)
9602                ret = -TARGET_EFAULT;
9603            else
9604                ret = get_errno(symlink(p, p2));
9605            unlock_user(p2, arg2, 0);
9606            unlock_user(p, arg1, 0);
9607        }
9608        return ret;
9609#endif
9610#if defined(TARGET_NR_symlinkat)
9611    case TARGET_NR_symlinkat:
9612        {
9613            void *p2;
9614            p  = lock_user_string(arg1);
9615            p2 = lock_user_string(arg3);
9616            if (!p || !p2)
9617                ret = -TARGET_EFAULT;
9618            else
9619                ret = get_errno(symlinkat(p, arg2, p2));
9620            unlock_user(p2, arg3, 0);
9621            unlock_user(p, arg1, 0);
9622        }
9623        return ret;
9624#endif
9625#ifdef TARGET_NR_readlink
9626    case TARGET_NR_readlink:
9627        {
9628            void *p2;
9629            p = lock_user_string(arg1);
9630            p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9631            if (!p || !p2) {
9632                ret = -TARGET_EFAULT;
9633            } else if (!arg3) {
9634                /* Short circuit this for the magic exe check. */
9635                ret = -TARGET_EINVAL;
9636            } else if (is_proc_myself((const char *)p, "exe")) {
9637                char real[PATH_MAX], *temp;
9638                temp = realpath(exec_path, real);
9639                /* Return value is # of bytes that we wrote to the buffer. */
9640                if (temp == NULL) {
9641                    ret = get_errno(-1);
9642                } else {
9643                    /* Don't worry about sign mismatch as earlier mapping
9644                     * logic would have thrown a bad address error. */
9645                    ret = MIN(strlen(real), arg3);
9646                    /* We cannot NUL terminate the string. */
9647                    memcpy(p2, real, ret);
9648                }
9649            } else {
9650                ret = get_errno(readlink(path(p), p2, arg3));
9651            }
9652            unlock_user(p2, arg2, ret);
9653            unlock_user(p, arg1, 0);
9654        }
9655        return ret;
9656#endif
9657#if defined(TARGET_NR_readlinkat)
9658    case TARGET_NR_readlinkat:
9659        {
9660            void *p2;
9661            p  = lock_user_string(arg2);
9662            p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9663            if (!p || !p2) {
9664                ret = -TARGET_EFAULT;
9665            } else if (is_proc_myself((const char *)p, "exe")) {
9666                char real[PATH_MAX], *temp;
9667                temp = realpath(exec_path, real);
9668                ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9669                snprintf((char *)p2, arg4, "%s", real);
9670            } else {
9671                ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9672            }
9673            unlock_user(p2, arg3, ret);
9674            unlock_user(p, arg2, 0);
9675        }
9676        return ret;
9677#endif
9678#ifdef TARGET_NR_swapon
9679    case TARGET_NR_swapon:
9680        if (!(p = lock_user_string(arg1)))
9681            return -TARGET_EFAULT;
9682        ret = get_errno(swapon(p, arg2));
9683        unlock_user(p, arg1, 0);
9684        return ret;
9685#endif
9686    case TARGET_NR_reboot:
9687        if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9688           /* arg4 must be ignored in all other cases */
9689           p = lock_user_string(arg4);
9690           if (!p) {
9691               return -TARGET_EFAULT;
9692           }
9693           ret = get_errno(reboot(arg1, arg2, arg3, p));
9694           unlock_user(p, arg4, 0);
9695        } else {
9696           ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9697        }
9698        return ret;
9699#ifdef TARGET_NR_mmap
9700    case TARGET_NR_mmap:
9701#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9702    (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9703    defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9704    || defined(TARGET_S390X)
9705        {
9706            abi_ulong *v;
9707            abi_ulong v1, v2, v3, v4, v5, v6;
9708            if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9709                return -TARGET_EFAULT;
9710            v1 = tswapal(v[0]);
9711            v2 = tswapal(v[1]);
9712            v3 = tswapal(v[2]);
9713            v4 = tswapal(v[3]);
9714            v5 = tswapal(v[4]);
9715            v6 = tswapal(v[5]);
9716            unlock_user(v, arg1, 0);
9717            ret = get_errno(target_mmap(v1, v2, v3,
9718                                        target_to_host_bitmask(v4, mmap_flags_tbl),
9719                                        v5, v6));
9720        }
9721#else
9722        /* mmap pointers are always untagged */
9723        ret = get_errno(target_mmap(arg1, arg2, arg3,
9724                                    target_to_host_bitmask(arg4, mmap_flags_tbl),
9725                                    arg5,
9726                                    arg6));
9727#endif
9728        return ret;
9729#endif
9730#ifdef TARGET_NR_mmap2
9731    case TARGET_NR_mmap2:
9732#ifndef MMAP_SHIFT
9733#define MMAP_SHIFT 12
9734#endif
9735        ret = target_mmap(arg1, arg2, arg3,
9736                          target_to_host_bitmask(arg4, mmap_flags_tbl),
9737                          arg5, arg6 << MMAP_SHIFT);
9738        return get_errno(ret);
9739#endif
9740    case TARGET_NR_munmap:
9741        arg1 = cpu_untagged_addr(cpu, arg1);
9742        return get_errno(target_munmap(arg1, arg2));
9743    case TARGET_NR_mprotect:
9744        arg1 = cpu_untagged_addr(cpu, arg1);
9745        {
9746            TaskState *ts = cpu->opaque;
9747            /* Special hack to detect libc making the stack executable.  */
9748            if ((arg3 & PROT_GROWSDOWN)
9749                && arg1 >= ts->info->stack_limit
9750                && arg1 <= ts->info->start_stack) {
9751                arg3 &= ~PROT_GROWSDOWN;
9752                arg2 = arg2 + arg1 - ts->info->stack_limit;
9753                arg1 = ts->info->stack_limit;
9754            }
9755        }
9756        return get_errno(target_mprotect(arg1, arg2, arg3));
9757#ifdef TARGET_NR_mremap
9758    case TARGET_NR_mremap:
9759        arg1 = cpu_untagged_addr(cpu, arg1);
9760        /* mremap new_addr (arg5) is always untagged */
9761        return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9762#endif
9763        /* ??? msync/mlock/munlock are broken for softmmu.  */
9764#ifdef TARGET_NR_msync
9765    case TARGET_NR_msync:
9766        return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9767#endif
9768#ifdef TARGET_NR_mlock
9769    case TARGET_NR_mlock:
9770        return get_errno(mlock(g2h(cpu, arg1), arg2));
9771#endif
9772#ifdef TARGET_NR_munlock
9773    case TARGET_NR_munlock:
9774        return get_errno(munlock(g2h(cpu, arg1), arg2));
9775#endif
9776#ifdef TARGET_NR_mlockall
9777    case TARGET_NR_mlockall:
9778        return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9779#endif
9780#ifdef TARGET_NR_munlockall
9781    case TARGET_NR_munlockall:
9782        return get_errno(munlockall());
9783#endif
9784#ifdef TARGET_NR_truncate
9785    case TARGET_NR_truncate:
9786        if (!(p = lock_user_string(arg1)))
9787            return -TARGET_EFAULT;
9788        ret = get_errno(truncate(p, arg2));
9789        unlock_user(p, arg1, 0);
9790        return ret;
9791#endif
9792#ifdef TARGET_NR_ftruncate
9793    case TARGET_NR_ftruncate:
9794        return get_errno(ftruncate(arg1, arg2));
9795#endif
9796    case TARGET_NR_fchmod:
9797        return get_errno(fchmod(arg1, arg2));
9798#if defined(TARGET_NR_fchmodat)
9799    case TARGET_NR_fchmodat:
9800        if (!(p = lock_user_string(arg2)))
9801            return -TARGET_EFAULT;
9802        ret = get_errno(fchmodat(arg1, p, arg3, 0));
9803        unlock_user(p, arg2, 0);
9804        return ret;
9805#endif
9806    case TARGET_NR_getpriority:
9807        /* Note that negative values are valid for getpriority, so we must
9808           differentiate based on errno settings.  */
9809        errno = 0;
9810        ret = getpriority(arg1, arg2);
9811        if (ret == -1 && errno != 0) {
9812            return -host_to_target_errno(errno);
9813        }
9814#ifdef TARGET_ALPHA
9815        /* Return value is the unbiased priority.  Signal no error.  */
9816        ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9817#else
9818        /* Return value is a biased priority to avoid negative numbers.  */
9819        ret = 20 - ret;
9820#endif
9821        return ret;
9822    case TARGET_NR_setpriority:
9823        return get_errno(setpriority(arg1, arg2, arg3));
9824#ifdef TARGET_NR_statfs
9825    case TARGET_NR_statfs:
9826        if (!(p = lock_user_string(arg1))) {
9827            return -TARGET_EFAULT;
9828        }
9829        ret = get_errno(statfs(path(p), &stfs));
9830        unlock_user(p, arg1, 0);
9831    convert_statfs:
9832        if (!is_error(ret)) {
9833            struct target_statfs *target_stfs;
9834
9835            if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9836                return -TARGET_EFAULT;
9837            __put_user(stfs.f_type, &target_stfs->f_type);
9838            __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9839            __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9840            __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9841            __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9842            __put_user(stfs.f_files, &target_stfs->f_files);
9843            __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9844            __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9845            __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9846            __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9847            __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9848#ifdef _STATFS_F_FLAGS
9849            __put_user(stfs.f_flags, &target_stfs->f_flags);
9850#else
9851            __put_user(0, &target_stfs->f_flags);
9852#endif
9853            memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9854            unlock_user_struct(target_stfs, arg2, 1);
9855        }
9856        return ret;
9857#endif
9858#ifdef TARGET_NR_fstatfs
9859    case TARGET_NR_fstatfs:
9860        ret = get_errno(fstatfs(arg1, &stfs));
9861        goto convert_statfs;
9862#endif
9863#ifdef TARGET_NR_statfs64
9864    case TARGET_NR_statfs64:
9865        if (!(p = lock_user_string(arg1))) {
9866            return -TARGET_EFAULT;
9867        }
9868        ret = get_errno(statfs(path(p), &stfs));
9869        unlock_user(p, arg1, 0);
9870    convert_statfs64:
9871        if (!is_error(ret)) {
9872            struct target_statfs64 *target_stfs;
9873
9874            if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9875                return -TARGET_EFAULT;
9876            __put_user(stfs.f_type, &target_stfs->f_type);
9877            __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9878            __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9879            __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9880            __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9881            __put_user(stfs.f_files, &target_stfs->f_files);
9882            __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9883            __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9884            __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9885            __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9886            __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9887#ifdef _STATFS_F_FLAGS
9888            __put_user(stfs.f_flags, &target_stfs->f_flags);
9889#else
9890            __put_user(0, &target_stfs->f_flags);
9891#endif
9892            memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9893            unlock_user_struct(target_stfs, arg3, 1);
9894        }
9895        return ret;
9896    case TARGET_NR_fstatfs64:
9897        ret = get_errno(fstatfs(arg1, &stfs));
9898        goto convert_statfs64;
9899#endif
9900#ifdef TARGET_NR_socketcall
9901    case TARGET_NR_socketcall:
9902        return do_socketcall(arg1, arg2);
9903#endif
9904#ifdef TARGET_NR_accept
9905    case TARGET_NR_accept:
9906        return do_accept4(arg1, arg2, arg3, 0);
9907#endif
9908#ifdef TARGET_NR_accept4
9909    case TARGET_NR_accept4:
9910        return do_accept4(arg1, arg2, arg3, arg4);
9911#endif
9912#ifdef TARGET_NR_bind
9913    case TARGET_NR_bind:
9914        return do_bind(arg1, arg2, arg3);
9915#endif
9916#ifdef TARGET_NR_connect
9917    case TARGET_NR_connect:
9918        return do_connect(arg1, arg2, arg3);
9919#endif
9920#ifdef TARGET_NR_getpeername
9921    case TARGET_NR_getpeername:
9922        return do_getpeername(arg1, arg2, arg3);
9923#endif
9924#ifdef TARGET_NR_getsockname
9925    case TARGET_NR_getsockname:
9926        return do_getsockname(arg1, arg2, arg3);
9927#endif
9928#ifdef TARGET_NR_getsockopt
9929    case TARGET_NR_getsockopt:
9930        return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9931#endif
9932#ifdef TARGET_NR_listen
9933    case TARGET_NR_listen:
9934        return get_errno(listen(arg1, arg2));
9935#endif
9936#ifdef TARGET_NR_recv
9937    case TARGET_NR_recv:
9938        return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9939#endif
9940#ifdef TARGET_NR_recvfrom
9941    case TARGET_NR_recvfrom:
9942        return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9943#endif
9944#ifdef TARGET_NR_recvmsg
9945    case TARGET_NR_recvmsg:
9946        return do_sendrecvmsg(arg1, arg2, arg3, 0);
9947#endif
9948#ifdef TARGET_NR_send
9949    case TARGET_NR_send:
9950        return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9951#endif
9952#ifdef TARGET_NR_sendmsg
9953    case TARGET_NR_sendmsg:
9954        return do_sendrecvmsg(arg1, arg2, arg3, 1);
9955#endif
9956#ifdef TARGET_NR_sendmmsg
9957    case TARGET_NR_sendmmsg:
9958        return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9959#endif
9960#ifdef TARGET_NR_recvmmsg
9961    case TARGET_NR_recvmmsg:
9962        return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9963#endif
9964#ifdef TARGET_NR_sendto
9965    case TARGET_NR_sendto:
9966        return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9967#endif
9968#ifdef TARGET_NR_shutdown
9969    case TARGET_NR_shutdown:
9970        return get_errno(shutdown(arg1, arg2));
9971#endif
9972#if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9973    case TARGET_NR_getrandom:
9974        p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9975        if (!p) {
9976            return -TARGET_EFAULT;
9977        }
9978        ret = get_errno(getrandom(p, arg2, arg3));
9979        unlock_user(p, arg1, ret);
9980        return ret;
9981#endif
9982#ifdef TARGET_NR_socket
9983    case TARGET_NR_socket:
9984        return do_socket(arg1, arg2, arg3);
9985#endif
9986#ifdef TARGET_NR_socketpair
9987    case TARGET_NR_socketpair:
9988        return do_socketpair(arg1, arg2, arg3, arg4);
9989#endif
9990#ifdef TARGET_NR_setsockopt
9991    case TARGET_NR_setsockopt:
9992        return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9993#endif
9994#if defined(TARGET_NR_syslog)
9995    case TARGET_NR_syslog:
9996        {
9997            int len = arg2;
9998
9999            switch (arg1) {
10000            case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10001            case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10002            case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10003            case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10004            case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10005            case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10006            case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10007            case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10008                return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10009            case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10010            case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10011            case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10012                {
10013                    if (len < 0) {
10014                        return -TARGET_EINVAL;
10015                    }
10016                    if (len == 0) {
10017                        return 0;
10018                    }
10019                    p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10020                    if (!p) {
10021                        return -TARGET_EFAULT;
10022                    }
10023                    ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10024                    unlock_user(p, arg2, arg3);
10025                }
10026                return ret;
10027            default:
10028                return -TARGET_EINVAL;
10029            }
10030        }
10031        break;
10032#endif
10033    case TARGET_NR_setitimer:
10034        {
10035            struct itimerval value, ovalue, *pvalue;
10036
10037            if (arg2) {
10038                pvalue = &value;
10039                if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10040                    || copy_from_user_timeval(&pvalue->it_value,
10041                                              arg2 + sizeof(struct target_timeval)))
10042                    return -TARGET_EFAULT;
10043            } else {
10044                pvalue = NULL;
10045            }
10046            ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10047            if (!is_error(ret) && arg3) {
10048                if (copy_to_user_timeval(arg3,
10049                                         &ovalue.it_interval)
10050                    || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10051                                            &ovalue.it_value))
10052                    return -TARGET_EFAULT;
10053            }
10054        }
10055        return ret;
10056    case TARGET_NR_getitimer:
10057        {
10058            struct itimerval value;
10059
10060            ret = get_errno(getitimer(arg1, &value));
10061            if (!is_error(ret) && arg2) {
10062                if (copy_to_user_timeval(arg2,
10063                                         &value.it_interval)
10064                    || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10065                                            &value.it_value))
10066                    return -TARGET_EFAULT;
10067            }
10068        }
10069        return ret;
10070#ifdef TARGET_NR_stat
10071    case TARGET_NR_stat:
10072        if (!(p = lock_user_string(arg1))) {
10073            return -TARGET_EFAULT;
10074        }
10075        ret = get_errno(stat(path(p), &st));
10076        unlock_user(p, arg1, 0);
10077        goto do_stat;
10078#endif
10079#ifdef TARGET_NR_lstat
10080    case TARGET_NR_lstat:
10081        if (!(p = lock_user_string(arg1))) {
10082            return -TARGET_EFAULT;
10083        }
10084        ret = get_errno(lstat(path(p), &st));
10085        unlock_user(p, arg1, 0);
10086        goto do_stat;
10087#endif
10088#ifdef TARGET_NR_fstat
10089    case TARGET_NR_fstat:
10090        {
10091            ret = get_errno(fstat(arg1, &st));
10092#if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10093        do_stat:
10094#endif
10095            if (!is_error(ret)) {
10096                struct target_stat *target_st;
10097
10098                if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10099                    return -TARGET_EFAULT;
10100                memset(target_st, 0, sizeof(*target_st));
10101                __put_user(st.st_dev, &target_st->st_dev);
10102                __put_user(st.st_ino, &target_st->st_ino);
10103                __put_user(st.st_mode, &target_st->st_mode);
10104                __put_user(st.st_uid, &target_st->st_uid);
10105                __put_user(st.st_gid, &target_st->st_gid);
10106                __put_user(st.st_nlink, &target_st->st_nlink);
10107                __put_user(st.st_rdev, &target_st->st_rdev);
10108                __put_user(st.st_size, &target_st->st_size);
10109                __put_user(st.st_blksize, &target_st->st_blksize);
10110                __put_user(st.st_blocks, &target_st->st_blocks);
10111                __put_user(st.st_atime, &target_st->target_st_atime);
10112                __put_user(st.st_mtime, &target_st->target_st_mtime);
10113                __put_user(st.st_ctime, &target_st->target_st_ctime);
10114#if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10115    defined(TARGET_STAT_HAVE_NSEC)
10116                __put_user(st.st_atim.tv_nsec,
10117                           &target_st->target_st_atime_nsec);
10118                __put_user(st.st_mtim.tv_nsec,
10119                           &target_st->target_st_mtime_nsec);
10120                __put_user(st.st_ctim.tv_nsec,
10121                           &target_st->target_st_ctime_nsec);
10122#endif
10123                unlock_user_struct(target_st, arg2, 1);
10124            }
10125        }
10126        return ret;
10127#endif
10128    case TARGET_NR_vhangup:
10129        return get_errno(vhangup());
10130#ifdef TARGET_NR_syscall
10131    case TARGET_NR_syscall:
10132        return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10133                          arg6, arg7, arg8, 0);
10134#endif
10135#if defined(TARGET_NR_wait4)
10136    case TARGET_NR_wait4:
10137        {
10138            int status;
10139            abi_long status_ptr = arg2;
10140            struct rusage rusage, *rusage_ptr;
10141            abi_ulong target_rusage = arg4;
10142            abi_long rusage_err;
10143            if (target_rusage)
10144                rusage_ptr = &rusage;
10145            else
10146                rusage_ptr = NULL;
10147            ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10148            if (!is_error(ret)) {
10149                if (status_ptr && ret) {
10150                    status = host_to_target_waitstatus(status);
10151                    if (put_user_s32(status, status_ptr))
10152                        return -TARGET_EFAULT;
10153                }
10154                if (target_rusage) {
10155                    rusage_err = host_to_target_rusage(target_rusage, &rusage);
10156                    if (rusage_err) {
10157                        ret = rusage_err;
10158                    }
10159                }
10160            }
10161        }
10162        return ret;
10163#endif
10164#ifdef TARGET_NR_swapoff
10165    case TARGET_NR_swapoff:
10166        if (!(p = lock_user_string(arg1)))
10167            return -TARGET_EFAULT;
10168        ret = get_errno(swapoff(p));
10169        unlock_user(p, arg1, 0);
10170        return ret;
10171#endif
10172    case TARGET_NR_sysinfo:
10173        {
10174            struct target_sysinfo *target_value;
10175            struct sysinfo value;
10176            ret = get_errno(sysinfo(&value));
10177            if (!is_error(ret) && arg1)
10178            {
10179                if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10180                    return -TARGET_EFAULT;
10181                __put_user(value.uptime, &target_value->uptime);
10182                __put_user(value.loads[0], &target_value->loads[0]);
10183                __put_user(value.loads[1], &target_value->loads[1]);
10184                __put_user(value.loads[2], &target_value->loads[2]);
10185                __put_user(value.totalram, &target_value->totalram);
10186                __put_user(value.freeram, &target_value->freeram);
10187                __put_user(value.sharedram, &target_value->sharedram);
10188                __put_user(value.bufferram, &target_value->bufferram);
10189                __put_user(value.totalswap, &target_value->totalswap);
10190                __put_user(value.freeswap, &target_value->freeswap);
10191                __put_user(value.procs, &target_value->procs);
10192                __put_user(value.totalhigh, &target_value->totalhigh);
10193                __put_user(value.freehigh, &target_value->freehigh);
10194                __put_user(value.mem_unit, &target_value->mem_unit);
10195                unlock_user_struct(target_value, arg1, 1);
10196            }
10197        }
10198        return ret;
10199#ifdef TARGET_NR_ipc
10200    case TARGET_NR_ipc:
10201        return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10202#endif
10203#ifdef TARGET_NR_semget
10204    case TARGET_NR_semget:
10205        return get_errno(semget(arg1, arg2, arg3));
10206#endif
10207#ifdef TARGET_NR_semop
10208    case TARGET_NR_semop:
10209        return do_semtimedop(arg1, arg2, arg3, 0, false);
10210#endif
10211#ifdef TARGET_NR_semtimedop
10212    case TARGET_NR_semtimedop:
10213        return do_semtimedop(arg1, arg2, arg3, arg4, false);
10214#endif
10215#ifdef TARGET_NR_semtimedop_time64
10216    case TARGET_NR_semtimedop_time64:
10217        return do_semtimedop(arg1, arg2, arg3, arg4, true);
10218#endif
10219#ifdef TARGET_NR_semctl
10220    case TARGET_NR_semctl:
10221        return do_semctl(arg1, arg2, arg3, arg4);
10222#endif
10223#ifdef TARGET_NR_msgctl
10224    case TARGET_NR_msgctl:
10225        return do_msgctl(arg1, arg2, arg3);
10226#endif
10227#ifdef TARGET_NR_msgget
10228    case TARGET_NR_msgget:
10229        return get_errno(msgget(arg1, arg2));
10230#endif
10231#ifdef TARGET_NR_msgrcv
10232    case TARGET_NR_msgrcv:
10233        return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10234#endif
10235#ifdef TARGET_NR_msgsnd
10236    case TARGET_NR_msgsnd:
10237        return do_msgsnd(arg1, arg2, arg3, arg4);
10238#endif
10239#ifdef TARGET_NR_shmget
10240    case TARGET_NR_shmget:
10241        return get_errno(shmget(arg1, arg2, arg3));
10242#endif
10243#ifdef TARGET_NR_shmctl
10244    case TARGET_NR_shmctl:
10245        return do_shmctl(arg1, arg2, arg3);
10246#endif
10247#ifdef TARGET_NR_shmat
10248    case TARGET_NR_shmat:
10249        return do_shmat(cpu_env, arg1, arg2, arg3);
10250#endif
10251#ifdef TARGET_NR_shmdt
10252    case TARGET_NR_shmdt:
10253        return do_shmdt(arg1);
10254#endif
10255    case TARGET_NR_fsync:
10256        return get_errno(fsync(arg1));
10257    case TARGET_NR_clone:
10258        /* Linux manages to have three different orderings for its
10259         * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10260         * match the kernel's CONFIG_CLONE_* settings.
10261         * Microblaze is further special in that it uses a sixth
10262         * implicit argument to clone for the TLS pointer.
10263         */
10264#if defined(TARGET_MICROBLAZE)
10265        ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10266#elif defined(TARGET_CLONE_BACKWARDS)
10267        ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10268#elif defined(TARGET_CLONE_BACKWARDS2)
10269        ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10270#else
10271        ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10272#endif
10273        return ret;
10274#ifdef __NR_exit_group
10275        /* new thread calls */
10276    case TARGET_NR_exit_group:
10277        preexit_cleanup(cpu_env, arg1);
10278        return get_errno(exit_group(arg1));
10279#endif
10280    case TARGET_NR_setdomainname:
10281        if (!(p = lock_user_string(arg1)))
10282            return -TARGET_EFAULT;
10283        ret = get_errno(setdomainname(p, arg2));
10284        unlock_user(p, arg1, 0);
10285        return ret;
10286    case TARGET_NR_uname:
10287        /* no need to transcode because we use the linux syscall */
10288        {
10289            struct new_utsname * buf;
10290
10291            if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10292                return -TARGET_EFAULT;
10293            ret = get_errno(sys_uname(buf));
10294            if (!is_error(ret)) {
10295                /* Overwrite the native machine name with whatever is being
10296                   emulated. */
10297                g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10298                          sizeof(buf->machine));
10299                /* Allow the user to override the reported release.  */
10300                if (qemu_uname_release && *qemu_uname_release) {
10301                    g_strlcpy(buf->release, qemu_uname_release,
10302                              sizeof(buf->release));
10303                }
10304            }
10305            unlock_user_struct(buf, arg1, 1);
10306        }
10307        return ret;
10308#ifdef TARGET_I386
10309    case TARGET_NR_modify_ldt:
10310        return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10311#if !defined(TARGET_X86_64)
10312    case TARGET_NR_vm86:
10313        return do_vm86(cpu_env, arg1, arg2);
10314#endif
10315#endif
10316#if defined(TARGET_NR_adjtimex)
10317    case TARGET_NR_adjtimex:
10318        {
10319            struct timex host_buf;
10320
10321            if (target_to_host_timex(&host_buf, arg1) != 0) {
10322                return -TARGET_EFAULT;
10323            }
10324            ret = get_errno(adjtimex(&host_buf));
10325            if (!is_error(ret)) {
10326                if (host_to_target_timex(arg1, &host_buf) != 0) {
10327                    return -TARGET_EFAULT;
10328                }
10329            }
10330        }
10331        return ret;
10332#endif
10333#if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10334    case TARGET_NR_clock_adjtime:
10335        {
10336            struct timex htx, *phtx = &htx;
10337
10338            if (target_to_host_timex(phtx, arg2) != 0) {
10339                return -TARGET_EFAULT;
10340            }
10341            ret = get_errno(clock_adjtime(arg1, phtx));
10342            if (!is_error(ret) && phtx) {
10343                if (host_to_target_timex(arg2, phtx) != 0) {
10344                    return -TARGET_EFAULT;
10345                }
10346            }
10347        }
10348        return ret;
10349#endif
10350#if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10351    case TARGET_NR_clock_adjtime64:
10352        {
10353            struct timex htx;
10354
10355            if (target_to_host_timex64(&htx, arg2) != 0) {
10356                return -TARGET_EFAULT;
10357            }
10358            ret = get_errno(clock_adjtime(arg1, &htx));
10359            if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10360                    return -TARGET_EFAULT;
10361            }
10362        }
10363        return ret;
10364#endif
10365    case TARGET_NR_getpgid:
10366        return get_errno(getpgid(arg1));
10367    case TARGET_NR_fchdir:
10368        return get_errno(fchdir(arg1));
10369    case TARGET_NR_personality:
10370        return get_errno(personality(arg1));
10371#ifdef TARGET_NR__llseek /* Not on alpha */
10372    case TARGET_NR__llseek:
10373        {
10374            int64_t res;
10375#if !defined(__NR_llseek)
10376            res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10377            if (res == -1) {
10378                ret = get_errno(res);
10379            } else {
10380                ret = 0;
10381            }
10382#else
10383            ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10384#endif
10385            if ((ret == 0) && put_user_s64(res, arg4)) {
10386                return -TARGET_EFAULT;
10387            }
10388        }
10389        return ret;
10390#endif
10391#ifdef TARGET_NR_getdents
10392    case TARGET_NR_getdents:
10393#ifdef EMULATE_GETDENTS_WITH_GETDENTS
10394#if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10395        {
10396            struct target_dirent *target_dirp;
10397            struct linux_dirent *dirp;
10398            abi_long count = arg3;
10399
10400            dirp = g_try_malloc(count);
10401            if (!dirp) {
10402                return -TARGET_ENOMEM;
10403            }
10404
10405            ret = get_errno(sys_getdents(arg1, dirp, count));
10406            if (!is_error(ret)) {
10407                struct linux_dirent *de;
10408                struct target_dirent *tde;
10409                int len = ret;
10410                int reclen, treclen;
10411                int count1, tnamelen;
10412
10413                count1 = 0;
10414                de = dirp;
10415                if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10416                    return -TARGET_EFAULT;
10417                tde = target_dirp;
10418                while (len > 0) {
10419                    reclen = de->d_reclen;
10420                    tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10421                    assert(tnamelen >= 0);
10422                    treclen = tnamelen + offsetof(struct target_dirent, d_name);
10423                    assert(count1 + treclen <= count);
10424                    tde->d_reclen = tswap16(treclen);
10425                    tde->d_ino = tswapal(de->d_ino);
10426                    tde->d_off = tswapal(de->d_off);
10427                    memcpy(tde->d_name, de->d_name, tnamelen);
10428                    de = (struct linux_dirent *)((char *)de + reclen);
10429                    len -= reclen;
10430                    tde = (struct target_dirent *)((char *)tde + treclen);
10431                    count1 += treclen;
10432                }
10433                ret = count1;
10434                unlock_user(target_dirp, arg2, ret);
10435            }
10436            g_free(dirp);
10437        }
10438#else
10439        {
10440            struct linux_dirent *dirp;
10441            abi_long count = arg3;
10442
10443            if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10444                return -TARGET_EFAULT;
10445            ret = get_errno(sys_getdents(arg1, dirp, count));
10446            if (!is_error(ret)) {
10447                struct linux_dirent *de;
10448                int len = ret;
10449                int reclen;
10450                de = dirp;
10451                while (len > 0) {
10452                    reclen = de->d_reclen;
10453                    if (reclen > len)
10454                        break;
10455                    de->d_reclen = tswap16(reclen);
10456                    tswapls(&de->d_ino);
10457                    tswapls(&de->d_off);
10458                    de = (struct linux_dirent *)((char *)de + reclen);
10459                    len -= reclen;
10460                }
10461            }
10462            unlock_user(dirp, arg2, ret);
10463        }
10464#endif
10465#else
10466        /* Implement getdents in terms of getdents64 */
10467        {
10468            struct linux_dirent64 *dirp;
10469            abi_long count = arg3;
10470
10471            dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10472            if (!dirp) {
10473                return -TARGET_EFAULT;
10474            }
10475            ret = get_errno(sys_getdents64(arg1, dirp, count));
10476            if (!is_error(ret)) {
10477                /* Convert the dirent64 structs to target dirent.  We do this
10478                 * in-place, since we can guarantee that a target_dirent is no
10479                 * larger than a dirent64; however this means we have to be
10480                 * careful to read everything before writing in the new format.
10481                 */
10482                struct linux_dirent64 *de;
10483                struct target_dirent *tde;
10484                int len = ret;
10485                int tlen = 0;
10486
10487                de = dirp;
10488                tde = (struct target_dirent *)dirp;
10489                while (len > 0) {
10490                    int namelen, treclen;
10491                    int reclen = de->d_reclen;
10492                    uint64_t ino = de->d_ino;
10493                    int64_t off = de->d_off;
10494                    uint8_t type = de->d_type;
10495
10496                    namelen = strlen(de->d_name);
10497                    treclen = offsetof(struct target_dirent, d_name)
10498                        + namelen + 2;
10499                    treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10500
10501                    memmove(tde->d_name, de->d_name, namelen + 1);
10502                    tde->d_ino = tswapal(ino);
10503                    tde->d_off = tswapal(off);
10504                    tde->d_reclen = tswap16(treclen);
10505                    /* The target_dirent type is in what was formerly a padding
10506                     * byte at the end of the structure:
10507                     */
10508                    *(((char *)tde) + treclen - 1) = type;
10509
10510                    de = (struct linux_dirent64 *)((char *)de + reclen);
10511                    tde = (struct target_dirent *)((char *)tde + treclen);
10512                    len -= reclen;
10513                    tlen += treclen;
10514                }
10515                ret = tlen;
10516            }
10517            unlock_user(dirp, arg2, ret);
10518        }
10519#endif
10520        return ret;
10521#endif /* TARGET_NR_getdents */
10522#if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10523    case TARGET_NR_getdents64:
10524        {
10525            struct linux_dirent64 *dirp;
10526            abi_long count = arg3;
10527            if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10528                return -TARGET_EFAULT;
10529            ret = get_errno(sys_getdents64(arg1, dirp, count));
10530            if (!is_error(ret)) {
10531                struct linux_dirent64 *de;
10532                int len = ret;
10533                int reclen;
10534                de = dirp;
10535                while (len > 0) {
10536                    reclen = de->d_reclen;
10537                    if (reclen > len)
10538                        break;
10539                    de->d_reclen = tswap16(reclen);
10540                    tswap64s((uint64_t *)&de->d_ino);
10541                    tswap64s((uint64_t *)&de->d_off);
10542                    de = (struct linux_dirent64 *)((char *)de + reclen);
10543                    len -= reclen;
10544                }
10545            }
10546            unlock_user(dirp, arg2, ret);
10547        }
10548        return ret;
10549#endif /* TARGET_NR_getdents64 */
10550#if defined(TARGET_NR__newselect)
10551    case TARGET_NR__newselect:
10552        return do_select(arg1, arg2, arg3, arg4, arg5);
10553#endif
10554#ifdef TARGET_NR_poll
10555    case TARGET_NR_poll:
10556        return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10557#endif
10558#ifdef TARGET_NR_ppoll
10559    case TARGET_NR_ppoll:
10560        return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10561#endif
10562#ifdef TARGET_NR_ppoll_time64
10563    case TARGET_NR_ppoll_time64:
10564        return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10565#endif
10566    case TARGET_NR_flock:
10567        /* NOTE: the flock constant seems to be the same for every
10568           Linux platform */
10569        return get_errno(safe_flock(arg1, arg2));
10570    case TARGET_NR_readv:
10571        {
10572            struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10573            if (vec != NULL) {
10574                ret = get_errno(safe_readv(arg1, vec, arg3));
10575                unlock_iovec(vec, arg2, arg3, 1);
10576            } else {
10577                ret = -host_to_target_errno(errno);
10578            }
10579        }
10580        return ret;
10581    case TARGET_NR_writev:
10582        {
10583            struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10584            if (vec != NULL) {
10585                ret = get_errno(safe_writev(arg1, vec, arg3));
10586                unlock_iovec(vec, arg2, arg3, 0);
10587            } else {
10588                ret = -host_to_target_errno(errno);
10589            }
10590        }
10591        return ret;
10592#if defined(TARGET_NR_preadv)
10593    case TARGET_NR_preadv:
10594        {
10595            struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10596            if (vec != NULL) {
10597                unsigned long low, high;
10598
10599                target_to_host_low_high(arg4, arg5, &low, &high);
10600                ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10601                unlock_iovec(vec, arg2, arg3, 1);
10602            } else {
10603                ret = -host_to_target_errno(errno);
10604           }
10605        }
10606        return ret;
10607#endif
10608#if defined(TARGET_NR_pwritev)
10609    case TARGET_NR_pwritev:
10610        {
10611            struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10612            if (vec != NULL) {
10613                unsigned long low, high;
10614
10615                target_to_host_low_high(arg4, arg5, &low, &high);
10616                ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10617                unlock_iovec(vec, arg2, arg3, 0);
10618            } else {
10619                ret = -host_to_target_errno(errno);
10620           }
10621        }
10622        return ret;
10623#endif
10624    case TARGET_NR_getsid:
10625        return get_errno(getsid(arg1));
10626#if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10627    case TARGET_NR_fdatasync:
10628        return get_errno(fdatasync(arg1));
10629#endif
10630    case TARGET_NR_sched_getaffinity:
10631        {
10632            unsigned int mask_size;
10633            unsigned long *mask;
10634
10635            /*
10636             * sched_getaffinity needs multiples of ulong, so need to take
10637             * care of mismatches between target ulong and host ulong sizes.
10638             */
10639            if (arg2 & (sizeof(abi_ulong) - 1)) {
10640                return -TARGET_EINVAL;
10641            }
10642            mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10643
10644            mask = alloca(mask_size);
10645            memset(mask, 0, mask_size);
10646            ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10647
10648            if (!is_error(ret)) {
10649                if (ret > arg2) {
10650                    /* More data returned than the caller's buffer will fit.
10651                     * This only happens if sizeof(abi_long) < sizeof(long)
10652                     * and the caller passed us a buffer holding an odd number
10653                     * of abi_longs. If the host kernel is actually using the
10654                     * extra 4 bytes then fail EINVAL; otherwise we can just
10655                     * ignore them and only copy the interesting part.
10656                     */
10657                    int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10658                    if (numcpus > arg2 * 8) {
10659                        return -TARGET_EINVAL;
10660                    }
10661                    ret = arg2;
10662                }
10663
10664                if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10665                    return -TARGET_EFAULT;
10666                }
10667            }
10668        }
10669        return ret;
10670    case TARGET_NR_sched_setaffinity:
10671        {
10672            unsigned int mask_size;
10673            unsigned long *mask;
10674
10675            /*
10676             * sched_setaffinity needs multiples of ulong, so need to take
10677             * care of mismatches between target ulong and host ulong sizes.
10678             */
10679            if (arg2 & (sizeof(abi_ulong) - 1)) {
10680                return -TARGET_EINVAL;
10681            }
10682            mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10683            mask = alloca(mask_size);
10684
10685            ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10686            if (ret) {
10687                return ret;
10688            }
10689
10690            return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10691        }
10692    case TARGET_NR_getcpu:
10693        {
10694            unsigned cpu, node;
10695            ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10696                                       arg2 ? &node : NULL,
10697                                       NULL));
10698            if (is_error(ret)) {
10699                return ret;
10700            }
10701            if (arg1 && put_user_u32(cpu, arg1)) {
10702                return -TARGET_EFAULT;
10703            }
10704            if (arg2 && put_user_u32(node, arg2)) {
10705                return -TARGET_EFAULT;
10706            }
10707        }
10708        return ret;
10709    case TARGET_NR_sched_setparam:
10710        {
10711            struct sched_param *target_schp;
10712            struct sched_param schp;
10713
10714            if (arg2 == 0) {
10715                return -TARGET_EINVAL;
10716            }
10717            if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10718                return -TARGET_EFAULT;
10719            schp.sched_priority = tswap32(target_schp->sched_priority);
10720            unlock_user_struct(target_schp, arg2, 0);
10721            return get_errno(sched_setparam(arg1, &schp));
10722        }
10723    case TARGET_NR_sched_getparam:
10724        {
10725            struct sched_param *target_schp;
10726            struct sched_param schp;
10727
10728            if (arg2 == 0) {
10729                return -TARGET_EINVAL;
10730            }
10731            ret = get_errno(sched_getparam(arg1, &schp));
10732            if (!is_error(ret)) {
10733                if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10734                    return -TARGET_EFAULT;
10735                target_schp->sched_priority = tswap32(schp.sched_priority);
10736                unlock_user_struct(target_schp, arg2, 1);
10737            }
10738        }
10739        return ret;
10740    case TARGET_NR_sched_setscheduler:
10741        {
10742            struct sched_param *target_schp;
10743            struct sched_param schp;
10744            if (arg3 == 0) {
10745                return -TARGET_EINVAL;
10746            }
10747            if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10748                return -TARGET_EFAULT;
10749            schp.sched_priority = tswap32(target_schp->sched_priority);
10750            unlock_user_struct(target_schp, arg3, 0);
10751            return get_errno(sched_setscheduler(arg1, arg2, &schp));
10752        }
10753    case TARGET_NR_sched_getscheduler:
10754        return get_errno(sched_getscheduler(arg1));
10755    case TARGET_NR_sched_yield:
10756        return get_errno(sched_yield());
10757    case TARGET_NR_sched_get_priority_max:
10758        return get_errno(sched_get_priority_max(arg1));
10759    case TARGET_NR_sched_get_priority_min:
10760        return get_errno(sched_get_priority_min(arg1));
10761#ifdef TARGET_NR_sched_rr_get_interval
10762    case TARGET_NR_sched_rr_get_interval:
10763        {
10764            struct timespec ts;
10765            ret = get_errno(sched_rr_get_interval(arg1, &ts));
10766            if (!is_error(ret)) {
10767                ret = host_to_target_timespec(arg2, &ts);
10768            }
10769        }
10770        return ret;
10771#endif
10772#ifdef TARGET_NR_sched_rr_get_interval_time64
10773    case TARGET_NR_sched_rr_get_interval_time64:
10774        {
10775            struct timespec ts;
10776            ret = get_errno(sched_rr_get_interval(arg1, &ts));
10777            if (!is_error(ret)) {
10778                ret = host_to_target_timespec64(arg2, &ts);
10779            }
10780        }
10781        return ret;
10782#endif
10783#if defined(TARGET_NR_nanosleep)
10784    case TARGET_NR_nanosleep:
10785        {
10786            struct timespec req, rem;
10787            target_to_host_timespec(&req, arg1);
10788            ret = get_errno(safe_nanosleep(&req, &rem));
10789            if (is_error(ret) && arg2) {
10790                host_to_target_timespec(arg2, &rem);
10791            }
10792        }
10793        return ret;
10794#endif
10795    case TARGET_NR_prctl:
10796        switch (arg1) {
10797        case PR_GET_PDEATHSIG:
10798        {
10799            int deathsig;
10800            ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10801            if (!is_error(ret) && arg2
10802                && put_user_s32(deathsig, arg2)) {
10803                return -TARGET_EFAULT;
10804            }
10805            return ret;
10806        }
10807#ifdef PR_GET_NAME
10808        case PR_GET_NAME:
10809        {
10810            void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10811            if (!name) {
10812                return -TARGET_EFAULT;
10813            }
10814            ret = get_errno(prctl(arg1, (unsigned long)name,
10815                                  arg3, arg4, arg5));
10816            unlock_user(name, arg2, 16);
10817            return ret;
10818        }
10819        case PR_SET_NAME:
10820        {
10821            void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10822            if (!name) {
10823                return -TARGET_EFAULT;
10824            }
10825            ret = get_errno(prctl(arg1, (unsigned long)name,
10826                                  arg3, arg4, arg5));
10827            unlock_user(name, arg2, 0);
10828            return ret;
10829        }
10830#endif
10831#ifdef TARGET_MIPS
10832        case TARGET_PR_GET_FP_MODE:
10833        {
10834            CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10835            ret = 0;
10836            if (env->CP0_Status & (1 << CP0St_FR)) {
10837                ret |= TARGET_PR_FP_MODE_FR;
10838            }
10839            if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10840                ret |= TARGET_PR_FP_MODE_FRE;
10841            }
10842            return ret;
10843        }
10844        case TARGET_PR_SET_FP_MODE:
10845        {
10846            CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10847            bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10848            bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10849            bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10850            bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10851
10852            const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10853                                            TARGET_PR_FP_MODE_FRE;
10854
10855            /* If nothing to change, return right away, successfully.  */
10856            if (old_fr == new_fr && old_fre == new_fre) {
10857                return 0;
10858            }
10859            /* Check the value is valid */
10860            if (arg2 & ~known_bits) {
10861                return -TARGET_EOPNOTSUPP;
10862            }
10863            /* Setting FRE without FR is not supported.  */
10864            if (new_fre && !new_fr) {
10865                return -TARGET_EOPNOTSUPP;
10866            }
10867            if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10868                /* FR1 is not supported */
10869                return -TARGET_EOPNOTSUPP;
10870            }
10871            if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10872                && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10873                /* cannot set FR=0 */
10874                return -TARGET_EOPNOTSUPP;
10875            }
10876            if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10877                /* Cannot set FRE=1 */
10878                return -TARGET_EOPNOTSUPP;
10879            }
10880
10881            int i;
10882            fpr_t *fpr = env->active_fpu.fpr;
10883            for (i = 0; i < 32 ; i += 2) {
10884                if (!old_fr && new_fr) {
10885                    fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10886                } else if (old_fr && !new_fr) {
10887                    fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10888                }
10889            }
10890
10891            if (new_fr) {
10892                env->CP0_Status |= (1 << CP0St_FR);
10893                env->hflags |= MIPS_HFLAG_F64;
10894            } else {
10895                env->CP0_Status &= ~(1 << CP0St_FR);
10896                env->hflags &= ~MIPS_HFLAG_F64;
10897            }
10898            if (new_fre) {
10899                env->CP0_Config5 |= (1 << CP0C5_FRE);
10900                if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10901                    env->hflags |= MIPS_HFLAG_FRE;
10902                }
10903            } else {
10904                env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10905                env->hflags &= ~MIPS_HFLAG_FRE;
10906            }
10907
10908            return 0;
10909        }
10910#endif /* MIPS */
10911#ifdef TARGET_AARCH64
10912        case TARGET_PR_SVE_SET_VL:
10913            /*
10914             * We cannot support either PR_SVE_SET_VL_ONEXEC or
10915             * PR_SVE_VL_INHERIT.  Note the kernel definition
10916             * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10917             * even though the current architectural maximum is VQ=16.
10918             */
10919            ret = -TARGET_EINVAL;
10920            if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10921                && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10922                CPUARMState *env = cpu_env;
10923                ARMCPU *cpu = env_archcpu(env);
10924                uint32_t vq, old_vq;
10925
10926                old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10927                vq = MAX(arg2 / 16, 1);
10928                vq = MIN(vq, cpu->sve_max_vq);
10929
10930                if (vq < old_vq) {
10931                    aarch64_sve_narrow_vq(env, vq);
10932                }
10933                env->vfp.zcr_el[1] = vq - 1;
10934                arm_rebuild_hflags(env);
10935                ret = vq * 16;
10936            }
10937            return ret;
10938        case TARGET_PR_SVE_GET_VL:
10939            ret = -TARGET_EINVAL;
10940            {
10941                ARMCPU *cpu = env_archcpu(cpu_env);
10942                if (cpu_isar_feature(aa64_sve, cpu)) {
10943                    ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10944                }
10945            }
10946            return ret;
10947        case TARGET_PR_PAC_RESET_KEYS:
10948            {
10949                CPUARMState *env = cpu_env;
10950                ARMCPU *cpu = env_archcpu(env);
10951
10952                if (arg3 || arg4 || arg5) {
10953                    return -TARGET_EINVAL;
10954                }
10955                if (cpu_isar_feature(aa64_pauth, cpu)) {
10956                    int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10957                               TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10958                               TARGET_PR_PAC_APGAKEY);
10959                    int ret = 0;
10960                    Error *err = NULL;
10961
10962                    if (arg2 == 0) {
10963                        arg2 = all;
10964                    } else if (arg2 & ~all) {
10965                        return -TARGET_EINVAL;
10966                    }
10967                    if (arg2 & TARGET_PR_PAC_APIAKEY) {
10968                        ret |= qemu_guest_getrandom(&env->keys.apia,
10969                                                    sizeof(ARMPACKey), &err);
10970                    }
10971                    if (arg2 & TARGET_PR_PAC_APIBKEY) {
10972                        ret |= qemu_guest_getrandom(&env->keys.apib,
10973                                                    sizeof(ARMPACKey), &err);
10974                    }
10975                    if (arg2 & TARGET_PR_PAC_APDAKEY) {
10976                        ret |= qemu_guest_getrandom(&env->keys.apda,
10977                                                    sizeof(ARMPACKey), &err);
10978                    }
10979                    if (arg2 & TARGET_PR_PAC_APDBKEY) {
10980                        ret |= qemu_guest_getrandom(&env->keys.apdb,
10981                                                    sizeof(ARMPACKey), &err);
10982                    }
10983                    if (arg2 & TARGET_PR_PAC_APGAKEY) {
10984                        ret |= qemu_guest_getrandom(&env->keys.apga,
10985                                                    sizeof(ARMPACKey), &err);
10986                    }
10987                    if (ret != 0) {
10988                        /*
10989                         * Some unknown failure in the crypto.  The best
10990                         * we can do is log it and fail the syscall.
10991                         * The real syscall cannot fail this way.
10992                         */
10993                        qemu_log_mask(LOG_UNIMP,
10994                                      "PR_PAC_RESET_KEYS: Crypto failure: %s",
10995                                      error_get_pretty(err));
10996                        error_free(err);
10997                        return -TARGET_EIO;
10998                    }
10999                    return 0;
11000                }
11001            }
11002            return -TARGET_EINVAL;
11003        case TARGET_PR_SET_TAGGED_ADDR_CTRL:
11004            {
11005                abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
11006                CPUARMState *env = cpu_env;
11007                ARMCPU *cpu = env_archcpu(env);
11008
11009                if (cpu_isar_feature(aa64_mte, cpu)) {
11010                    valid_mask |= TARGET_PR_MTE_TCF_MASK;
11011                    valid_mask |= TARGET_PR_MTE_TAG_MASK;
11012                }
11013
11014                if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
11015                    return -TARGET_EINVAL;
11016                }
11017                env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
11018
11019                if (cpu_isar_feature(aa64_mte, cpu)) {
11020                    switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
11021                    case TARGET_PR_MTE_TCF_NONE:
11022                    case TARGET_PR_MTE_TCF_SYNC:
11023                    case TARGET_PR_MTE_TCF_ASYNC:
11024                        break;
11025                    default:
11026                        return -EINVAL;
11027                    }
11028
11029                    /*
11030                     * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
11031                     * Note that the syscall values are consistent with hw.
11032                     */
11033                    env->cp15.sctlr_el[1] =
11034                        deposit64(env->cp15.sctlr_el[1], 38, 2,
11035                                  arg2 >> TARGET_PR_MTE_TCF_SHIFT);
11036
11037                    /*
11038                     * Write PR_MTE_TAG to GCR_EL1[Exclude].
11039                     * Note that the syscall uses an include mask,
11040                     * and hardware uses an exclude mask -- invert.
11041                     */
11042                    env->cp15.gcr_el1 =
11043                        deposit64(env->cp15.gcr_el1, 0, 16,
11044                                  ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
11045                    arm_rebuild_hflags(env);
11046                }
11047                return 0;
11048            }
11049        case TARGET_PR_GET_TAGGED_ADDR_CTRL:
11050            {
11051                abi_long ret = 0;
11052                CPUARMState *env = cpu_env;
11053                ARMCPU *cpu = env_archcpu(env);
11054
11055                if (arg2 || arg3 || arg4 || arg5) {
11056                    return -TARGET_EINVAL;
11057                }
11058                if (env->tagged_addr_enable) {
11059                    ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
11060                }
11061                if (cpu_isar_feature(aa64_mte, cpu)) {
11062                    /* See above. */
11063                    ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
11064                            << TARGET_PR_MTE_TCF_SHIFT);
11065                    ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
11066                                    ~env->cp15.gcr_el1);
11067                }
11068                return ret;
11069            }
11070#endif /* AARCH64 */
11071        case PR_GET_SECCOMP:
11072        case PR_SET_SECCOMP:
11073            /* Disable seccomp to prevent the target disabling syscalls we
11074             * need. */
11075            return -TARGET_EINVAL;
11076        default:
11077            /* Most prctl options have no pointer arguments */
11078            return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
11079        }
11080        break;
11081#ifdef TARGET_NR_arch_prctl
11082    case TARGET_NR_arch_prctl:
11083        return do_arch_prctl(cpu_env, arg1, arg2);
11084#endif
11085#ifdef TARGET_NR_pread64
11086    case TARGET_NR_pread64:
11087        if (regpairs_aligned(cpu_env, num)) {
11088            arg4 = arg5;
11089            arg5 = arg6;
11090        }
11091        if (arg2 == 0 && arg3 == 0) {
11092            /* Special-case NULL buffer and zero length, which should succeed */
11093            p = 0;
11094        } else {
11095            p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11096            if (!p) {
11097                return -TARGET_EFAULT;
11098            }
11099        }
11100        ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11101        unlock_user(p, arg2, ret);
11102        return ret;
11103    case TARGET_NR_pwrite64:
11104        if (regpairs_aligned(cpu_env, num)) {
11105            arg4 = arg5;
11106            arg5 = arg6;
11107        }
11108        if (arg2 == 0 && arg3 == 0) {
11109            /* Special-case NULL buffer and zero length, which should succeed */
11110            p = 0;
11111        } else {
11112            p = lock_user(VERIFY_READ, arg2, arg3, 1);
11113            if (!p) {
11114                return -TARGET_EFAULT;
11115            }
11116        }
11117        ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11118        unlock_user(p, arg2, 0);
11119        return ret;
11120#endif
11121    case TARGET_NR_getcwd:
11122        if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11123            return -TARGET_EFAULT;
11124        ret = get_errno(sys_getcwd1(p, arg2));
11125        unlock_user(p, arg1, ret);
11126        return ret;
11127    case TARGET_NR_capget:
11128    case TARGET_NR_capset:
11129    {
11130        struct target_user_cap_header *target_header;
11131        struct target_user_cap_data *target_data = NULL;
11132        struct __user_cap_header_struct header;
11133        struct __user_cap_data_struct data[2];
11134        struct __user_cap_data_struct *dataptr = NULL;
11135        int i, target_datalen;
11136        int data_items = 1;
11137
11138        if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11139            return -TARGET_EFAULT;
11140        }
11141        header.version = tswap32(target_header->version);
11142        header.pid = tswap32(target_header->pid);
11143
11144        if (header.version != _LINUX_CAPABILITY_VERSION) {
11145            /* Version 2 and up takes pointer to two user_data structs */
11146            data_items = 2;
11147        }
11148
11149        target_datalen = sizeof(*target_data) * data_items;
11150
11151        if (arg2) {
11152            if (num == TARGET_NR_capget) {
11153                target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11154            } else {
11155                target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11156            }
11157            if (!target_data) {
11158                unlock_user_struct(target_header, arg1, 0);
11159                return -TARGET_EFAULT;
11160            }
11161
11162            if (num == TARGET_NR_capset) {
11163                for (i = 0; i < data_items; i++) {
11164                    data[i].effective = tswap32(target_data[i].effective);
11165                    data[i].permitted = tswap32(target_data[i].permitted);
11166                    data[i].inheritable = tswap32(target_data[i].inheritable);
11167                }
11168            }
11169
11170            dataptr = data;
11171        }
11172
11173        if (num == TARGET_NR_capget) {
11174            ret = get_errno(capget(&header, dataptr));
11175        } else {
11176            ret = get_errno(capset(&header, dataptr));
11177        }
11178
11179        /* The kernel always updates version for both capget and capset */
11180        target_header->version = tswap32(header.version);
11181        unlock_user_struct(target_header, arg1, 1);
11182
11183        if (arg2) {
11184            if (num == TARGET_NR_capget) {
11185                for (i = 0; i < data_items; i++) {
11186                    target_data[i].effective = tswap32(data[i].effective);
11187                    target_data[i].permitted = tswap32(data[i].permitted);
11188                    target_data[i].inheritable = tswap32(data[i].inheritable);
11189                }
11190                unlock_user(target_data, arg2, target_datalen);
11191            } else {
11192                unlock_user(target_data, arg2, 0);
11193            }
11194        }
11195        return ret;
11196    }
11197    case TARGET_NR_sigaltstack:
11198        return do_sigaltstack(arg1, arg2,
11199                              get_sp_from_cpustate((CPUArchState *)cpu_env));
11200
11201#ifdef CONFIG_SENDFILE
11202#ifdef TARGET_NR_sendfile
11203    case TARGET_NR_sendfile:
11204    {
11205        off_t *offp = NULL;
11206        off_t off;
11207        if (arg3) {
11208            ret = get_user_sal(off, arg3);
11209            if (is_error(ret)) {
11210                return ret;
11211            }
11212            offp = &off;
11213        }
11214        ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11215        if (!is_error(ret) && arg3) {
11216            abi_long ret2 = put_user_sal(off, arg3);
11217            if (is_error(ret2)) {
11218                ret = ret2;
11219            }
11220        }
11221        return ret;
11222    }
11223#endif
11224#ifdef TARGET_NR_sendfile64
11225    case TARGET_NR_sendfile64:
11226    {
11227        off_t *offp = NULL;
11228        off_t off;
11229        if (arg3) {
11230            ret = get_user_s64(off, arg3);
11231            if (is_error(ret)) {
11232                return ret;
11233            }
11234            offp = &off;
11235        }
11236        ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11237        if (!is_error(ret) && arg3) {
11238            abi_long ret2 = put_user_s64(off, arg3);
11239            if (is_error(ret2)) {
11240                ret = ret2;
11241            }
11242        }
11243        return ret;
11244    }
11245#endif
11246#endif
11247#ifdef TARGET_NR_vfork
11248    case TARGET_NR_vfork:
11249        return get_errno(do_fork(cpu_env,
11250                         CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11251                         0, 0, 0, 0));
11252#endif
11253#ifdef TARGET_NR_ugetrlimit
11254    case TARGET_NR_ugetrlimit:
11255    {
11256        struct rlimit rlim;
11257        int resource = target_to_host_resource(arg1);
11258        ret = get_errno(getrlimit(resource, &rlim));
11259        if (!is_error(ret)) {
11260            struct target_rlimit *target_rlim;
11261            if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11262                return -TARGET_EFAULT;
11263            target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11264            target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11265            unlock_user_struct(target_rlim, arg2, 1);
11266        }
11267        return ret;
11268    }
11269#endif
11270#ifdef TARGET_NR_truncate64
11271    case TARGET_NR_truncate64:
11272        if (!(p = lock_user_string(arg1)))
11273            return -TARGET_EFAULT;
11274        ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11275        unlock_user(p, arg1, 0);
11276        return ret;
11277#endif
11278#ifdef TARGET_NR_ftruncate64
11279    case TARGET_NR_ftruncate64:
11280        return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11281#endif
11282#ifdef TARGET_NR_stat64
11283    case TARGET_NR_stat64:
11284        if (!(p = lock_user_string(arg1))) {
11285            return -TARGET_EFAULT;
11286        }
11287        ret = get_errno(stat(path(p), &st));
11288        unlock_user(p, arg1, 0);
11289        if (!is_error(ret))
11290            ret = host_to_target_stat64(cpu_env, arg2, &st);
11291        return ret;
11292#endif
11293#ifdef TARGET_NR_lstat64
11294    case TARGET_NR_lstat64:
11295        if (!(p = lock_user_string(arg1))) {
11296            return -TARGET_EFAULT;
11297        }
11298        ret = get_errno(lstat(path(p), &st));
11299        unlock_user(p, arg1, 0);
11300        if (!is_error(ret))
11301            ret = host_to_target_stat64(cpu_env, arg2, &st);
11302        return ret;
11303#endif
11304#ifdef TARGET_NR_fstat64
11305    case TARGET_NR_fstat64:
11306        ret = get_errno(fstat(arg1, &st));
11307        if (!is_error(ret))
11308            ret = host_to_target_stat64(cpu_env, arg2, &st);
11309        return ret;
11310#endif
11311#if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11312#ifdef TARGET_NR_fstatat64
11313    case TARGET_NR_fstatat64:
11314#endif
11315#ifdef TARGET_NR_newfstatat
11316    case TARGET_NR_newfstatat:
11317#endif
11318        if (!(p = lock_user_string(arg2))) {
11319            return -TARGET_EFAULT;
11320        }
11321        ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11322        unlock_user(p, arg2, 0);
11323        if (!is_error(ret))
11324            ret = host_to_target_stat64(cpu_env, arg3, &st);
11325        return ret;
11326#endif
11327#if defined(TARGET_NR_statx)
11328    case TARGET_NR_statx:
11329        {
11330            struct target_statx *target_stx;
11331            int dirfd = arg1;
11332            int flags = arg3;
11333
11334            p = lock_user_string(arg2);
11335            if (p == NULL) {
11336                return -TARGET_EFAULT;
11337            }
11338#if defined(__NR_statx)
11339            {
11340                /*
11341                 * It is assumed that struct statx is architecture independent.
11342                 */
11343                struct target_statx host_stx;
11344                int mask = arg4;
11345
11346                ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11347                if (!is_error(ret)) {
11348                    if (host_to_target_statx(&host_stx, arg5) != 0) {
11349                        unlock_user(p, arg2, 0);
11350                        return -TARGET_EFAULT;
11351                    }
11352                }
11353
11354                if (ret != -TARGET_ENOSYS) {
11355                    unlock_user(p, arg2, 0);
11356                    return ret;
11357                }
11358            }
11359#endif
11360            ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11361            unlock_user(p, arg2, 0);
11362
11363            if (!is_error(ret)) {
11364                if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11365                    return -TARGET_EFAULT;
11366                }
11367                memset(target_stx, 0, sizeof(*target_stx));
11368                __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11369                __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11370                __put_user(st.st_ino, &target_stx->stx_ino);
11371                __put_user(st.st_mode, &target_stx->stx_mode);
11372                __put_user(st.st_uid, &target_stx->stx_uid);
11373                __put_user(st.st_gid, &target_stx->stx_gid);
11374                __put_user(st.st_nlink, &target_stx->stx_nlink);
11375                __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11376                __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11377                __put_user(st.st_size, &target_stx->stx_size);
11378                __put_user(st.st_blksize, &target_stx->stx_blksize);
11379                __put_user(st.st_blocks, &target_stx->stx_blocks);
11380                __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11381                __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11382                __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11383                unlock_user_struct(target_stx, arg5, 1);
11384            }
11385        }
11386        return ret;
11387#endif
11388#ifdef TARGET_NR_lchown
11389    case TARGET_NR_lchown:
11390        if (!(p = lock_user_string(arg1)))
11391            return -TARGET_EFAULT;
11392        ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11393        unlock_user(p, arg1, 0);
11394        return ret;
11395#endif
11396#ifdef TARGET_NR_getuid
11397    case TARGET_NR_getuid:
11398        return get_errno(high2lowuid(getuid()));
11399#endif
11400#ifdef TARGET_NR_getgid
11401    case TARGET_NR_getgid:
11402        return get_errno(high2lowgid(getgid()));
11403#endif
11404#ifdef TARGET_NR_geteuid
11405    case TARGET_NR_geteuid:
11406        return get_errno(high2lowuid(geteuid()));
11407#endif
11408#ifdef TARGET_NR_getegid
11409    case TARGET_NR_getegid:
11410        return get_errno(high2lowgid(getegid()));
11411#endif
11412    case TARGET_NR_setreuid:
11413        return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11414    case TARGET_NR_setregid:
11415        return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11416    case TARGET_NR_getgroups:
11417        {
11418            int gidsetsize = arg1;
11419            target_id *target_grouplist;
11420            gid_t *grouplist;
11421            int i;
11422
11423            grouplist = alloca(gidsetsize * sizeof(gid_t));
11424            ret = get_errno(getgroups(gidsetsize, grouplist));
11425            if (gidsetsize == 0)
11426                return ret;
11427            if (!is_error(ret)) {
11428                target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11429                if (!target_grouplist)
11430                    return -TARGET_EFAULT;
11431                for(i = 0;i < ret; i++)
11432                    target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11433                unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11434            }
11435        }
11436        return ret;
11437    case TARGET_NR_setgroups:
11438        {
11439            int gidsetsize = arg1;
11440            target_id *target_grouplist;
11441            gid_t *grouplist = NULL;
11442            int i;
11443            if (gidsetsize) {
11444                grouplist = alloca(gidsetsize * sizeof(gid_t));
11445                target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11446                if (!target_grouplist) {
11447                    return -TARGET_EFAULT;
11448                }
11449                for (i = 0; i < gidsetsize; i++) {
11450                    grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11451                }
11452                unlock_user(target_grouplist, arg2, 0);
11453            }
11454            return get_errno(setgroups(gidsetsize, grouplist));
11455        }
11456    case TARGET_NR_fchown:
11457        return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11458#if defined(TARGET_NR_fchownat)
11459    case TARGET_NR_fchownat:
11460        if (!(p = lock_user_string(arg2))) 
11461            return -TARGET_EFAULT;
11462        ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11463                                 low2highgid(arg4), arg5));
11464        unlock_user(p, arg2, 0);
11465        return ret;
11466#endif
11467#ifdef TARGET_NR_setresuid
11468    case TARGET_NR_setresuid:
11469        return get_errno(sys_setresuid(low2highuid(arg1),
11470                                       low2highuid(arg2),
11471                                       low2highuid(arg3)));
11472#endif
11473#ifdef TARGET_NR_getresuid
11474    case TARGET_NR_getresuid:
11475        {
11476            uid_t ruid, euid, suid;
11477            ret = get_errno(getresuid(&ruid, &euid, &suid));
11478            if (!is_error(ret)) {
11479                if (put_user_id(high2lowuid(ruid), arg1)
11480                    || put_user_id(high2lowuid(euid), arg2)
11481                    || put_user_id(high2lowuid(suid), arg3))
11482                    return -TARGET_EFAULT;
11483            }
11484        }
11485        return ret;
11486#endif
11487#ifdef TARGET_NR_getresgid
11488    case TARGET_NR_setresgid:
11489        return get_errno(sys_setresgid(low2highgid(arg1),
11490                                       low2highgid(arg2),
11491                                       low2highgid(arg3)));
11492#endif
11493#ifdef TARGET_NR_getresgid
11494    case TARGET_NR_getresgid:
11495        {
11496            gid_t rgid, egid, sgid;
11497            ret = get_errno(getresgid(&rgid, &egid, &sgid));
11498            if (!is_error(ret)) {
11499                if (put_user_id(high2lowgid(rgid), arg1)
11500                    || put_user_id(high2lowgid(egid), arg2)
11501                    || put_user_id(high2lowgid(sgid), arg3))
11502                    return -TARGET_EFAULT;
11503            }
11504        }
11505        return ret;
11506#endif
11507#ifdef TARGET_NR_chown
11508    case TARGET_NR_chown:
11509        if (!(p = lock_user_string(arg1)))
11510            return -TARGET_EFAULT;
11511        ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11512        unlock_user(p, arg1, 0);
11513        return ret;
11514#endif
11515    case TARGET_NR_setuid:
11516        return get_errno(sys_setuid(low2highuid(arg1)));
11517    case TARGET_NR_setgid:
11518        return get_errno(sys_setgid(low2highgid(arg1)));
11519    case TARGET_NR_setfsuid:
11520        return get_errno(setfsuid(arg1));
11521    case TARGET_NR_setfsgid:
11522        return get_errno(setfsgid(arg1));
11523
11524#ifdef TARGET_NR_lchown32
11525    case TARGET_NR_lchown32:
11526        if (!(p = lock_user_string(arg1)))
11527            return -TARGET_EFAULT;
11528        ret = get_errno(lchown(p, arg2, arg3));
11529        unlock_user(p, arg1, 0);
11530        return ret;
11531#endif
11532#ifdef TARGET_NR_getuid32
11533    case TARGET_NR_getuid32:
11534        return get_errno(getuid());
11535#endif
11536
11537#if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11538   /* Alpha specific */
11539    case TARGET_NR_getxuid:
11540         {
11541            uid_t euid;
11542            euid=geteuid();
11543            ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11544         }
11545        return get_errno(getuid());
11546#endif
11547#if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11548   /* Alpha specific */
11549    case TARGET_NR_getxgid:
11550         {
11551            uid_t egid;
11552            egid=getegid();
11553            ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11554         }
11555        return get_errno(getgid());
11556#endif
11557#if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11558    /* Alpha specific */
11559    case TARGET_NR_osf_getsysinfo:
11560        ret = -TARGET_EOPNOTSUPP;
11561        switch (arg1) {
11562          case TARGET_GSI_IEEE_FP_CONTROL:
11563            {
11564                uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11565                uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11566
11567                swcr &= ~SWCR_STATUS_MASK;
11568                swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11569
11570                if (put_user_u64 (swcr, arg2))
11571                        return -TARGET_EFAULT;
11572                ret = 0;
11573            }
11574            break;
11575
11576          /* case GSI_IEEE_STATE_AT_SIGNAL:
11577             -- Not implemented in linux kernel.
11578             case GSI_UACPROC:
11579             -- Retrieves current unaligned access state; not much used.
11580             case GSI_PROC_TYPE:
11581             -- Retrieves implver information; surely not used.
11582             case GSI_GET_HWRPB:
11583             -- Grabs a copy of the HWRPB; surely not used.
11584          */
11585        }
11586        return ret;
11587#endif
11588#if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11589    /* Alpha specific */
11590    case TARGET_NR_osf_setsysinfo:
11591        ret = -TARGET_EOPNOTSUPP;
11592        switch (arg1) {
11593          case TARGET_SSI_IEEE_FP_CONTROL:
11594            {
11595                uint64_t swcr, fpcr;
11596
11597                if (get_user_u64 (swcr, arg2)) {
11598                    return -TARGET_EFAULT;
11599                }
11600
11601                /*
11602                 * The kernel calls swcr_update_status to update the
11603                 * status bits from the fpcr at every point that it
11604                 * could be queried.  Therefore, we store the status
11605                 * bits only in FPCR.
11606                 */
11607                ((CPUAlphaState *)cpu_env)->swcr
11608                    = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11609
11610                fpcr = cpu_alpha_load_fpcr(cpu_env);
11611                fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11612                fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11613                cpu_alpha_store_fpcr(cpu_env, fpcr);
11614                ret = 0;
11615            }
11616            break;
11617
11618          case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11619            {
11620                uint64_t exc, fpcr, fex;
11621
11622                if (get_user_u64(exc, arg2)) {
11623                    return -TARGET_EFAULT;
11624                }
11625                exc &= SWCR_STATUS_MASK;
11626                fpcr = cpu_alpha_load_fpcr(cpu_env);
11627
11628                /* Old exceptions are not signaled.  */
11629                fex = alpha_ieee_fpcr_to_swcr(fpcr);
11630                fex = exc & ~fex;
11631                fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11632                fex &= ((CPUArchState *)cpu_env)->swcr;
11633
11634                /* Update the hardware fpcr.  */
11635                fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11636                cpu_alpha_store_fpcr(cpu_env, fpcr);
11637
11638                if (fex) {
11639                    int si_code = TARGET_FPE_FLTUNK;
11640                    target_siginfo_t info;
11641
11642                    if (fex & SWCR_TRAP_ENABLE_DNO) {
11643                        si_code = TARGET_FPE_FLTUND;
11644                    }
11645                    if (fex & SWCR_TRAP_ENABLE_INE) {
11646                        si_code = TARGET_FPE_FLTRES;
11647                    }
11648                    if (fex & SWCR_TRAP_ENABLE_UNF) {
11649                        si_code = TARGET_FPE_FLTUND;
11650                    }
11651                    if (fex & SWCR_TRAP_ENABLE_OVF) {
11652                        si_code = TARGET_FPE_FLTOVF;
11653                    }
11654                    if (fex & SWCR_TRAP_ENABLE_DZE) {
11655                        si_code = TARGET_FPE_FLTDIV;
11656                    }
11657                    if (fex & SWCR_TRAP_ENABLE_INV) {
11658                        si_code = TARGET_FPE_FLTINV;
11659                    }
11660
11661                    info.si_signo = SIGFPE;
11662                    info.si_errno = 0;
11663                    info.si_code = si_code;
11664                    info._sifields._sigfault._addr
11665                        = ((CPUArchState *)cpu_env)->pc;
11666                    queue_signal((CPUArchState *)cpu_env, info.si_signo,
11667                                 QEMU_SI_FAULT, &info);
11668                }
11669                ret = 0;
11670            }
11671            break;
11672
11673          /* case SSI_NVPAIRS:
11674             -- Used with SSIN_UACPROC to enable unaligned accesses.
11675             case SSI_IEEE_STATE_AT_SIGNAL:
11676             case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11677             -- Not implemented in linux kernel
11678          */
11679        }
11680        return ret;
11681#endif
11682#ifdef TARGET_NR_osf_sigprocmask
11683    /* Alpha specific.  */
11684    case TARGET_NR_osf_sigprocmask:
11685        {
11686            abi_ulong mask;
11687            int how;
11688            sigset_t set, oldset;
11689
11690            switch(arg1) {
11691            case TARGET_SIG_BLOCK:
11692                how = SIG_BLOCK;
11693                break;
11694            case TARGET_SIG_UNBLOCK:
11695                how = SIG_UNBLOCK;
11696                break;
11697            case TARGET_SIG_SETMASK:
11698                how = SIG_SETMASK;
11699                break;
11700            default:
11701                return -TARGET_EINVAL;
11702            }
11703            mask = arg2;
11704            target_to_host_old_sigset(&set, &mask);
11705            ret = do_sigprocmask(how, &set, &oldset);
11706            if (!ret) {
11707                host_to_target_old_sigset(&mask, &oldset);
11708                ret = mask;
11709            }
11710        }
11711        return ret;
11712#endif
11713
11714#ifdef TARGET_NR_getgid32
11715    case TARGET_NR_getgid32:
11716        return get_errno(getgid());
11717#endif
11718#ifdef TARGET_NR_geteuid32
11719    case TARGET_NR_geteuid32:
11720        return get_errno(geteuid());
11721#endif
11722#ifdef TARGET_NR_getegid32
11723    case TARGET_NR_getegid32:
11724        return get_errno(getegid());
11725#endif
11726#ifdef TARGET_NR_setreuid32
11727    case TARGET_NR_setreuid32:
11728        return get_errno(setreuid(arg1, arg2));
11729#endif
11730#ifdef TARGET_NR_setregid32
11731    case TARGET_NR_setregid32:
11732        return get_errno(setregid(arg1, arg2));
11733#endif
11734#ifdef TARGET_NR_getgroups32
11735    case TARGET_NR_getgroups32:
11736        {
11737            int gidsetsize = arg1;
11738            uint32_t *target_grouplist;
11739            gid_t *grouplist;
11740            int i;
11741
11742            grouplist = alloca(gidsetsize * sizeof(gid_t));
11743            ret = get_errno(getgroups(gidsetsize, grouplist));
11744            if (gidsetsize == 0)
11745                return ret;
11746            if (!is_error(ret)) {
11747                target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11748                if (!target_grouplist) {
11749                    return -TARGET_EFAULT;
11750                }
11751                for(i = 0;i < ret; i++)
11752                    target_grouplist[i] = tswap32(grouplist[i]);
11753                unlock_user(target_grouplist, arg2, gidsetsize * 4);
11754            }
11755        }
11756        return ret;
11757#endif
11758#ifdef TARGET_NR_setgroups32
11759    case TARGET_NR_setgroups32:
11760        {
11761            int gidsetsize = arg1;
11762            uint32_t *target_grouplist;
11763            gid_t *grouplist;
11764            int i;
11765
11766            grouplist = alloca(gidsetsize * sizeof(gid_t));
11767            target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11768            if (!target_grouplist) {
11769                return -TARGET_EFAULT;
11770            }
11771            for(i = 0;i < gidsetsize; i++)
11772                grouplist[i] = tswap32(target_grouplist[i]);
11773            unlock_user(target_grouplist, arg2, 0);
11774            return get_errno(setgroups(gidsetsize, grouplist));
11775        }
11776#endif
11777#ifdef TARGET_NR_fchown32
11778    case TARGET_NR_fchown32:
11779        return get_errno(fchown(arg1, arg2, arg3));
11780#endif
11781#ifdef TARGET_NR_setresuid32
11782    case TARGET_NR_setresuid32:
11783        return get_errno(sys_setresuid(arg1, arg2, arg3));
11784#endif
11785#ifdef TARGET_NR_getresuid32
11786    case TARGET_NR_getresuid32:
11787        {
11788            uid_t ruid, euid, suid;
11789            ret = get_errno(getresuid(&ruid, &euid, &suid));
11790            if (!is_error(ret)) {
11791                if (put_user_u32(ruid, arg1)
11792                    || put_user_u32(euid, arg2)
11793                    || put_user_u32(suid, arg3))
11794                    return -TARGET_EFAULT;
11795            }
11796        }
11797        return ret;
11798#endif
11799#ifdef TARGET_NR_setresgid32
11800    case TARGET_NR_setresgid32:
11801        return get_errno(sys_setresgid(arg1, arg2, arg3));
11802#endif
11803#ifdef TARGET_NR_getresgid32
11804    case TARGET_NR_getresgid32:
11805        {
11806            gid_t rgid, egid, sgid;
11807            ret = get_errno(getresgid(&rgid, &egid, &sgid));
11808            if (!is_error(ret)) {
11809                if (put_user_u32(rgid, arg1)
11810                    || put_user_u32(egid, arg2)
11811                    || put_user_u32(sgid, arg3))
11812                    return -TARGET_EFAULT;
11813            }
11814        }
11815        return ret;
11816#endif
11817#ifdef TARGET_NR_chown32
11818    case TARGET_NR_chown32:
11819        if (!(p = lock_user_string(arg1)))
11820            return -TARGET_EFAULT;
11821        ret = get_errno(chown(p, arg2, arg3));
11822        unlock_user(p, arg1, 0);
11823        return ret;
11824#endif
11825#ifdef TARGET_NR_setuid32
11826    case TARGET_NR_setuid32:
11827        return get_errno(sys_setuid(arg1));
11828#endif
11829#ifdef TARGET_NR_setgid32
11830    case TARGET_NR_setgid32:
11831        return get_errno(sys_setgid(arg1));
11832#endif
11833#ifdef TARGET_NR_setfsuid32
11834    case TARGET_NR_setfsuid32:
11835        return get_errno(setfsuid(arg1));
11836#endif
11837#ifdef TARGET_NR_setfsgid32
11838    case TARGET_NR_setfsgid32:
11839        return get_errno(setfsgid(arg1));
11840#endif
11841#ifdef TARGET_NR_mincore
11842    case TARGET_NR_mincore:
11843        {
11844            void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11845            if (!a) {
11846                return -TARGET_ENOMEM;
11847            }
11848            p = lock_user_string(arg3);
11849            if (!p) {
11850                ret = -TARGET_EFAULT;
11851            } else {
11852                ret = get_errno(mincore(a, arg2, p));
11853                unlock_user(p, arg3, ret);
11854            }
11855            unlock_user(a, arg1, 0);
11856        }
11857        return ret;
11858#endif
11859#ifdef TARGET_NR_arm_fadvise64_64
11860    case TARGET_NR_arm_fadvise64_64:
11861        /* arm_fadvise64_64 looks like fadvise64_64 but
11862         * with different argument order: fd, advice, offset, len
11863         * rather than the usual fd, offset, len, advice.
11864         * Note that offset and len are both 64-bit so appear as
11865         * pairs of 32-bit registers.
11866         */
11867        ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11868                            target_offset64(arg5, arg6), arg2);
11869        return -host_to_target_errno(ret);
11870#endif
11871
11872#if TARGET_ABI_BITS == 32
11873
11874#ifdef TARGET_NR_fadvise64_64
11875    case TARGET_NR_fadvise64_64:
11876#if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11877        /* 6 args: fd, advice, offset (high, low), len (high, low) */
11878        ret = arg2;
11879        arg2 = arg3;
11880        arg3 = arg4;
11881        arg4 = arg5;
11882        arg5 = arg6;
11883        arg6 = ret;
11884#else
11885        /* 6 args: fd, offset (high, low), len (high, low), advice */
11886        if (regpairs_aligned(cpu_env, num)) {
11887            /* offset is in (3,4), len in (5,6) and advice in 7 */
11888            arg2 = arg3;
11889            arg3 = arg4;
11890            arg4 = arg5;
11891            arg5 = arg6;
11892            arg6 = arg7;
11893        }
11894#endif
11895        ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11896                            target_offset64(arg4, arg5), arg6);
11897        return -host_to_target_errno(ret);
11898#endif
11899
11900#ifdef TARGET_NR_fadvise64
11901    case TARGET_NR_fadvise64:
11902        /* 5 args: fd, offset (high, low), len, advice */
11903        if (regpairs_aligned(cpu_env, num)) {
11904            /* offset is in (3,4), len in 5 and advice in 6 */
11905            arg2 = arg3;
11906            arg3 = arg4;
11907            arg4 = arg5;
11908            arg5 = arg6;
11909        }
11910        ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11911        return -host_to_target_errno(ret);
11912#endif
11913
11914#else /* not a 32-bit ABI */
11915#if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11916#ifdef TARGET_NR_fadvise64_64
11917    case TARGET_NR_fadvise64_64:
11918#endif
11919#ifdef TARGET_NR_fadvise64
11920    case TARGET_NR_fadvise64:
11921#endif
11922#ifdef TARGET_S390X
11923        switch (arg4) {
11924        case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11925        case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11926        case 6: arg4 = POSIX_FADV_DONTNEED; break;
11927        case 7: arg4 = POSIX_FADV_NOREUSE; break;
11928        default: break;
11929        }
11930#endif
11931        return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11932#endif
11933#endif /* end of 64-bit ABI fadvise handling */
11934
11935#ifdef TARGET_NR_madvise
11936    case TARGET_NR_madvise:
11937        /* A straight passthrough may not be safe because qemu sometimes
11938           turns private file-backed mappings into anonymous mappings.
11939           This will break MADV_DONTNEED.
11940           This is a hint, so ignoring and returning success is ok.  */
11941        return 0;
11942#endif
11943#ifdef TARGET_NR_fcntl64
11944    case TARGET_NR_fcntl64:
11945    {
11946        int cmd;
11947        struct flock64 fl;
11948        from_flock64_fn *copyfrom = copy_from_user_flock64;
11949        to_flock64_fn *copyto = copy_to_user_flock64;
11950
11951#ifdef TARGET_ARM
11952        if (!((CPUARMState *)cpu_env)->eabi) {
11953            copyfrom = copy_from_user_oabi_flock64;
11954            copyto = copy_to_user_oabi_flock64;
11955        }
11956#endif
11957
11958        cmd = target_to_host_fcntl_cmd(arg2);
11959        if (cmd == -TARGET_EINVAL) {
11960            return cmd;
11961        }
11962
11963        switch(arg2) {
11964        case TARGET_F_GETLK64:
11965            ret = copyfrom(&fl, arg3);
11966            if (ret) {
11967                break;
11968            }
11969            ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11970            if (ret == 0) {
11971                ret = copyto(arg3, &fl);
11972            }
11973            break;
11974
11975        case TARGET_F_SETLK64:
11976        case TARGET_F_SETLKW64:
11977            ret = copyfrom(&fl, arg3);
11978            if (ret) {
11979                break;
11980            }
11981            ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11982            break;
11983        default:
11984            ret = do_fcntl(arg1, arg2, arg3);
11985            break;
11986        }
11987        return ret;
11988    }
11989#endif
11990#ifdef TARGET_NR_cacheflush
11991    case TARGET_NR_cacheflush:
11992        /* self-modifying code is handled automatically, so nothing needed */
11993        return 0;
11994#endif
11995#ifdef TARGET_NR_getpagesize
11996    case TARGET_NR_getpagesize:
11997        return TARGET_PAGE_SIZE;
11998#endif
11999    case TARGET_NR_gettid:
12000        return get_errno(sys_gettid());
12001#ifdef TARGET_NR_readahead
12002    case TARGET_NR_readahead:
12003#if TARGET_ABI_BITS == 32
12004        if (regpairs_aligned(cpu_env, num)) {
12005            arg2 = arg3;
12006            arg3 = arg4;
12007            arg4 = arg5;
12008        }
12009        ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12010#else
12011        ret = get_errno(readahead(arg1, arg2, arg3));
12012#endif
12013        return ret;
12014#endif
12015#ifdef CONFIG_ATTR
12016#ifdef TARGET_NR_setxattr
12017    case TARGET_NR_listxattr:
12018    case TARGET_NR_llistxattr:
12019    {
12020        void *p, *b = 0;
12021        if (arg2) {
12022            b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12023            if (!b) {
12024                return -TARGET_EFAULT;
12025            }
12026        }
12027        p = lock_user_string(arg1);
12028        if (p) {
12029            if (num == TARGET_NR_listxattr) {
12030                ret = get_errno(listxattr(p, b, arg3));
12031            } else {
12032                ret = get_errno(llistxattr(p, b, arg3));
12033            }
12034        } else {
12035            ret = -TARGET_EFAULT;
12036        }
12037        unlock_user(p, arg1, 0);
12038        unlock_user(b, arg2, arg3);
12039        return ret;
12040    }
12041    case TARGET_NR_flistxattr:
12042    {
12043        void *b = 0;
12044        if (arg2) {
12045            b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12046            if (!b) {
12047                return -TARGET_EFAULT;
12048            }
12049        }
12050        ret = get_errno(flistxattr(arg1, b, arg3));
12051        unlock_user(b, arg2, arg3);
12052        return ret;
12053    }
12054    case TARGET_NR_setxattr:
12055    case TARGET_NR_lsetxattr:
12056        {
12057            void *p, *n, *v = 0;
12058            if (arg3) {
12059                v = lock_user(VERIFY_READ, arg3, arg4, 1);
12060                if (!v) {
12061                    return -TARGET_EFAULT;
12062                }
12063            }
12064            p = lock_user_string(arg1);
12065            n = lock_user_string(arg2);
12066            if (p && n) {
12067                if (num == TARGET_NR_setxattr) {
12068                    ret = get_errno(setxattr(p, n, v, arg4, arg5));
12069                } else {
12070                    ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12071                }
12072            } else {
12073                ret = -TARGET_EFAULT;
12074            }
12075            unlock_user(p, arg1, 0);
12076            unlock_user(n, arg2, 0);
12077            unlock_user(v, arg3, 0);
12078        }
12079        return ret;
12080    case TARGET_NR_fsetxattr:
12081        {
12082            void *n, *v = 0;
12083            if (arg3) {
12084                v = lock_user(VERIFY_READ, arg3, arg4, 1);
12085                if (!v) {
12086                    return -TARGET_EFAULT;
12087                }
12088            }
12089            n = lock_user_string(arg2);
12090            if (n) {
12091                ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12092            } else {
12093                ret = -TARGET_EFAULT;
12094            }
12095            unlock_user(n, arg2, 0);
12096            unlock_user(v, arg3, 0);
12097        }
12098        return ret;
12099    case TARGET_NR_getxattr:
12100    case TARGET_NR_lgetxattr:
12101        {
12102            void *p, *n, *v = 0;
12103            if (arg3) {
12104                v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12105                if (!v) {
12106                    return -TARGET_EFAULT;
12107                }
12108            }
12109            p = lock_user_string(arg1);
12110            n = lock_user_string(arg2);
12111            if (p && n) {
12112                if (num == TARGET_NR_getxattr) {
12113                    ret = get_errno(getxattr(p, n, v, arg4));
12114                } else {
12115                    ret = get_errno(lgetxattr(p, n, v, arg4));
12116                }
12117            } else {
12118                ret = -TARGET_EFAULT;
12119            }
12120            unlock_user(p, arg1, 0);
12121            unlock_user(n, arg2, 0);
12122            unlock_user(v, arg3, arg4);
12123        }
12124        return ret;
12125    case TARGET_NR_fgetxattr:
12126        {
12127            void *n, *v = 0;
12128            if (arg3) {
12129                v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12130                if (!v) {
12131                    return -TARGET_EFAULT;
12132                }
12133            }
12134            n = lock_user_string(arg2);
12135            if (n) {
12136                ret = get_errno(fgetxattr(arg1, n, v, arg4));
12137            } else {
12138                ret = -TARGET_EFAULT;
12139            }
12140            unlock_user(n, arg2, 0);
12141            unlock_user(v, arg3, arg4);
12142        }
12143        return ret;
12144    case TARGET_NR_removexattr:
12145    case TARGET_NR_lremovexattr:
12146        {
12147            void *p, *n;
12148            p = lock_user_string(arg1);
12149            n = lock_user_string(arg2);
12150            if (p && n) {
12151                if (num == TARGET_NR_removexattr) {
12152                    ret = get_errno(removexattr(p, n));
12153                } else {
12154                    ret = get_errno(lremovexattr(p, n));
12155                }
12156            } else {
12157                ret = -TARGET_EFAULT;
12158            }
12159            unlock_user(p, arg1, 0);
12160            unlock_user(n, arg2, 0);
12161        }
12162        return ret;
12163    case TARGET_NR_fremovexattr:
12164        {
12165            void *n;
12166            n = lock_user_string(arg2);
12167            if (n) {
12168                ret = get_errno(fremovexattr(arg1, n));
12169            } else {
12170                ret = -TARGET_EFAULT;
12171            }
12172            unlock_user(n, arg2, 0);
12173        }
12174        return ret;
12175#endif
12176#endif /* CONFIG_ATTR */
12177#ifdef TARGET_NR_set_thread_area
12178    case TARGET_NR_set_thread_area:
12179#if defined(TARGET_MIPS)
12180      ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12181      return 0;
12182#elif defined(TARGET_CRIS)
12183      if (arg1 & 0xff)
12184          ret = -TARGET_EINVAL;
12185      else {
12186          ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12187          ret = 0;
12188      }
12189      return ret;
12190#elif defined(TARGET_I386) && defined(TARGET_ABI32)
12191      return do_set_thread_area(cpu_env, arg1);
12192#elif defined(TARGET_M68K)
12193      {
12194          TaskState *ts = cpu->opaque;
12195          ts->tp_value = arg1;
12196          return 0;
12197      }
12198#else
12199      return -TARGET_ENOSYS;
12200#endif
12201#endif
12202#ifdef TARGET_NR_get_thread_area
12203    case TARGET_NR_get_thread_area:
12204#if defined(TARGET_I386) && defined(TARGET_ABI32)
12205        return do_get_thread_area(cpu_env, arg1);
12206#elif defined(TARGET_M68K)
12207        {
12208            TaskState *ts = cpu->opaque;
12209            return ts->tp_value;
12210        }
12211#else
12212        return -TARGET_ENOSYS;
12213#endif
12214#endif
12215#ifdef TARGET_NR_getdomainname
12216    case TARGET_NR_getdomainname:
12217        return -TARGET_ENOSYS;
12218#endif
12219
12220#ifdef TARGET_NR_clock_settime
12221    case TARGET_NR_clock_settime:
12222    {
12223        struct timespec ts;
12224
12225        ret = target_to_host_timespec(&ts, arg2);
12226        if (!is_error(ret)) {
12227            ret = get_errno(clock_settime(arg1, &ts));
12228        }
12229        return ret;
12230    }
12231#endif
12232#ifdef TARGET_NR_clock_settime64
12233    case TARGET_NR_clock_settime64:
12234    {
12235        struct timespec ts;
12236
12237        ret = target_to_host_timespec64(&ts, arg2);
12238        if (!is_error(ret)) {
12239            ret = get_errno(clock_settime(arg1, &ts));
12240        }
12241        return ret;
12242    }
12243#endif
12244#ifdef TARGET_NR_clock_gettime
12245    case TARGET_NR_clock_gettime:
12246    {
12247        struct timespec ts;
12248        ret = get_errno(clock_gettime(arg1, &ts));
12249        if (!is_error(ret)) {
12250            ret = host_to_target_timespec(arg2, &ts);
12251        }
12252        return ret;
12253    }
12254#endif
12255#ifdef TARGET_NR_clock_gettime64
12256    case TARGET_NR_clock_gettime64:
12257    {
12258        struct timespec ts;
12259        ret = get_errno(clock_gettime(arg1, &ts));
12260        if (!is_error(ret)) {
12261            ret = host_to_target_timespec64(arg2, &ts);
12262        }
12263        return ret;
12264    }
12265#endif
12266#ifdef TARGET_NR_clock_getres
12267    case TARGET_NR_clock_getres:
12268    {
12269        struct timespec ts;
12270        ret = get_errno(clock_getres(arg1, &ts));
12271        if (!is_error(ret)) {
12272            host_to_target_timespec(arg2, &ts);
12273        }
12274        return ret;
12275    }
12276#endif
12277#ifdef TARGET_NR_clock_getres_time64
12278    case TARGET_NR_clock_getres_time64:
12279    {
12280        struct timespec ts;
12281        ret = get_errno(clock_getres(arg1, &ts));
12282        if (!is_error(ret)) {
12283            host_to_target_timespec64(arg2, &ts);
12284        }
12285        return ret;
12286    }
12287#endif
12288#ifdef TARGET_NR_clock_nanosleep
12289    case TARGET_NR_clock_nanosleep:
12290    {
12291        struct timespec ts;
12292        if (target_to_host_timespec(&ts, arg3)) {
12293            return -TARGET_EFAULT;
12294        }
12295        ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12296                                             &ts, arg4 ? &ts : NULL));
12297        /*
12298         * if the call is interrupted by a signal handler, it fails
12299         * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12300         * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12301         */
12302        if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12303            host_to_target_timespec(arg4, &ts)) {
12304              return -TARGET_EFAULT;
12305        }
12306
12307        return ret;
12308    }
12309#endif
12310#ifdef TARGET_NR_clock_nanosleep_time64
12311    case TARGET_NR_clock_nanosleep_time64:
12312    {
12313        struct timespec ts;
12314
12315        if (target_to_host_timespec64(&ts, arg3)) {
12316            return -TARGET_EFAULT;
12317        }
12318
12319        ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12320                                             &ts, arg4 ? &ts : NULL));
12321
12322        if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12323            host_to_target_timespec64(arg4, &ts)) {
12324            return -TARGET_EFAULT;
12325        }
12326        return ret;
12327    }
12328#endif
12329
12330#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12331    case TARGET_NR_set_tid_address:
12332        return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12333#endif
12334
12335    case TARGET_NR_tkill:
12336        return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12337
12338    case TARGET_NR_tgkill:
12339        return get_errno(safe_tgkill((int)arg1, (int)arg2,
12340                         target_to_host_signal(arg3)));
12341
12342#ifdef TARGET_NR_set_robust_list
12343    case TARGET_NR_set_robust_list:
12344    case TARGET_NR_get_robust_list:
12345        /* The ABI for supporting robust futexes has userspace pass
12346         * the kernel a pointer to a linked list which is updated by
12347         * userspace after the syscall; the list is walked by the kernel
12348         * when the thread exits. Since the linked list in QEMU guest
12349         * memory isn't a valid linked list for the host and we have
12350         * no way to reliably intercept the thread-death event, we can't
12351         * support these. Silently return ENOSYS so that guest userspace
12352         * falls back to a non-robust futex implementation (which should
12353         * be OK except in the corner case of the guest crashing while
12354         * holding a mutex that is shared with another process via
12355         * shared memory).
12356         */
12357        return -TARGET_ENOSYS;
12358#endif
12359
12360#if defined(TARGET_NR_utimensat)
12361    case TARGET_NR_utimensat:
12362        {
12363            struct timespec *tsp, ts[2];
12364            if (!arg3) {
12365                tsp = NULL;
12366            } else {
12367                if (target_to_host_timespec(ts, arg3)) {
12368                    return -TARGET_EFAULT;
12369                }
12370                if (target_to_host_timespec(ts + 1, arg3 +
12371                                            sizeof(struct target_timespec))) {
12372                    return -TARGET_EFAULT;
12373                }
12374                tsp = ts;
12375            }
12376            if (!arg2)
12377                ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12378            else {
12379                if (!(p = lock_user_string(arg2))) {
12380                    return -TARGET_EFAULT;
12381                }
12382                ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12383                unlock_user(p, arg2, 0);
12384            }
12385        }
12386        return ret;
12387#endif
12388#ifdef TARGET_NR_utimensat_time64
12389    case TARGET_NR_utimensat_time64:
12390        {
12391            struct timespec *tsp, ts[2];
12392            if (!arg3) {
12393                tsp = NULL;
12394            } else {
12395                if (target_to_host_timespec64(ts, arg3)) {
12396                    return -TARGET_EFAULT;
12397                }
12398                if (target_to_host_timespec64(ts + 1, arg3 +
12399                                     sizeof(struct target__kernel_timespec))) {
12400                    return -TARGET_EFAULT;
12401                }
12402                tsp = ts;
12403            }
12404            if (!arg2)
12405                ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12406            else {
12407                p = lock_user_string(arg2);
12408                if (!p) {
12409                    return -TARGET_EFAULT;
12410                }
12411                ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12412                unlock_user(p, arg2, 0);
12413            }
12414        }
12415        return ret;
12416#endif
12417#ifdef TARGET_NR_futex
12418    case TARGET_NR_futex:
12419        return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12420#endif
12421#ifdef TARGET_NR_futex_time64
12422    case TARGET_NR_futex_time64:
12423        return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12424#endif
12425#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12426    case TARGET_NR_inotify_init:
12427        ret = get_errno(sys_inotify_init());
12428        if (ret >= 0) {
12429            fd_trans_register(ret, &target_inotify_trans);
12430        }
12431        return ret;
12432#endif
12433#ifdef CONFIG_INOTIFY1
12434#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12435    case TARGET_NR_inotify_init1:
12436        ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12437                                          fcntl_flags_tbl)));
12438        if (ret >= 0) {
12439            fd_trans_register(ret, &target_inotify_trans);
12440        }
12441        return ret;
12442#endif
12443#endif
12444#if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12445    case TARGET_NR_inotify_add_watch:
12446        p = lock_user_string(arg2);
12447        ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12448        unlock_user(p, arg2, 0);
12449        return ret;
12450#endif
12451#if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12452    case TARGET_NR_inotify_rm_watch:
12453        return get_errno(sys_inotify_rm_watch(arg1, arg2));
12454#endif
12455
12456#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12457    case TARGET_NR_mq_open:
12458        {
12459            struct mq_attr posix_mq_attr;
12460            struct mq_attr *pposix_mq_attr;
12461            int host_flags;
12462
12463            host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12464            pposix_mq_attr = NULL;
12465            if (arg4) {
12466                if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12467                    return -TARGET_EFAULT;
12468                }
12469                pposix_mq_attr = &posix_mq_attr;
12470            }
12471            p = lock_user_string(arg1 - 1);
12472            if (!p) {
12473                return -TARGET_EFAULT;
12474            }
12475            ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12476            unlock_user (p, arg1, 0);
12477        }
12478        return ret;
12479
12480    case TARGET_NR_mq_unlink:
12481        p = lock_user_string(arg1 - 1);
12482        if (!p) {
12483            return -TARGET_EFAULT;
12484        }
12485        ret = get_errno(mq_unlink(p));
12486        unlock_user (p, arg1, 0);
12487        return ret;
12488
12489#ifdef TARGET_NR_mq_timedsend
12490    case TARGET_NR_mq_timedsend:
12491        {
12492            struct timespec ts;
12493
12494            p = lock_user (VERIFY_READ, arg2, arg3, 1);
12495            if (arg5 != 0) {
12496                if (target_to_host_timespec(&ts, arg5)) {
12497                    return -TARGET_EFAULT;
12498                }
12499                ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12500                if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12501                    return -TARGET_EFAULT;
12502                }
12503            } else {
12504                ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12505            }
12506            unlock_user (p, arg2, arg3);
12507        }
12508        return ret;
12509#endif
12510#ifdef TARGET_NR_mq_timedsend_time64
12511    case TARGET_NR_mq_timedsend_time64:
12512        {
12513            struct timespec ts;
12514
12515            p = lock_user(VERIFY_READ, arg2, arg3, 1);
12516            if (arg5 != 0) {
12517                if (target_to_host_timespec64(&ts, arg5)) {
12518                    return -TARGET_EFAULT;
12519                }
12520                ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12521                if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12522                    return -TARGET_EFAULT;
12523                }
12524            } else {
12525                ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12526            }
12527            unlock_user(p, arg2, arg3);
12528        }
12529        return ret;
12530#endif
12531
12532#ifdef TARGET_NR_mq_timedreceive
12533    case TARGET_NR_mq_timedreceive:
12534        {
12535            struct timespec ts;
12536            unsigned int prio;
12537
12538            p = lock_user (VERIFY_READ, arg2, arg3, 1);
12539            if (arg5 != 0) {
12540                if (target_to_host_timespec(&ts, arg5)) {
12541                    return -TARGET_EFAULT;
12542                }
12543                ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12544                                                     &prio, &ts));
12545                if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12546                    return -TARGET_EFAULT;
12547                }
12548            } else {
12549                ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12550                                                     &prio, NULL));
12551            }
12552            unlock_user (p, arg2, arg3);
12553            if (arg4 != 0)
12554                put_user_u32(prio, arg4);
12555        }
12556        return ret;
12557#endif
12558#ifdef TARGET_NR_mq_timedreceive_time64
12559    case TARGET_NR_mq_timedreceive_time64:
12560        {
12561            struct timespec ts;
12562            unsigned int prio;
12563
12564            p = lock_user(VERIFY_READ, arg2, arg3, 1);
12565            if (arg5 != 0) {
12566                if (target_to_host_timespec64(&ts, arg5)) {
12567                    return -TARGET_EFAULT;
12568                }
12569                ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12570                                                     &prio, &ts));
12571                if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12572                    return -TARGET_EFAULT;
12573                }
12574            } else {
12575                ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12576                                                     &prio, NULL));
12577            }
12578            unlock_user(p, arg2, arg3);
12579            if (arg4 != 0) {
12580                put_user_u32(prio, arg4);
12581            }
12582        }
12583        return ret;
12584#endif
12585
12586    /* Not implemented for now... */
12587/*     case TARGET_NR_mq_notify: */
12588/*         break; */
12589
12590    case TARGET_NR_mq_getsetattr:
12591        {
12592            struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12593            ret = 0;
12594            if (arg2 != 0) {
12595                copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12596                ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12597                                           &posix_mq_attr_out));
12598            } else if (arg3 != 0) {
12599                ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12600            }
12601            if (ret == 0 && arg3 != 0) {
12602                copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12603            }
12604        }
12605        return ret;
12606#endif
12607
12608#ifdef CONFIG_SPLICE
12609#ifdef TARGET_NR_tee
12610    case TARGET_NR_tee:
12611        {
12612            ret = get_errno(tee(arg1,arg2,arg3,arg4));
12613        }
12614        return ret;
12615#endif
12616#ifdef TARGET_NR_splice
12617    case TARGET_NR_splice:
12618        {
12619            loff_t loff_in, loff_out;
12620            loff_t *ploff_in = NULL, *ploff_out = NULL;
12621            if (arg2) {
12622                if (get_user_u64(loff_in, arg2)) {
12623                    return -TARGET_EFAULT;
12624                }
12625                ploff_in = &loff_in;
12626            }
12627            if (arg4) {
12628                if (get_user_u64(loff_out, arg4)) {
12629                    return -TARGET_EFAULT;
12630                }
12631                ploff_out = &loff_out;
12632            }
12633            ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12634            if (arg2) {
12635                if (put_user_u64(loff_in, arg2)) {
12636                    return -TARGET_EFAULT;
12637                }
12638            }
12639            if (arg4) {
12640                if (put_user_u64(loff_out, arg4)) {
12641                    return -TARGET_EFAULT;
12642                }
12643            }
12644        }
12645        return ret;
12646#endif
12647#ifdef TARGET_NR_vmsplice
12648        case TARGET_NR_vmsplice:
12649        {
12650            struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12651            if (vec != NULL) {
12652                ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12653                unlock_iovec(vec, arg2, arg3, 0);
12654            } else {
12655                ret = -host_to_target_errno(errno);
12656            }
12657        }
12658        return ret;
12659#endif
12660#endif /* CONFIG_SPLICE */
12661#ifdef CONFIG_EVENTFD
12662#if defined(TARGET_NR_eventfd)
12663    case TARGET_NR_eventfd:
12664        ret = get_errno(eventfd(arg1, 0));
12665        if (ret >= 0) {
12666            fd_trans_register(ret, &target_eventfd_trans);
12667        }
12668        return ret;
12669#endif
12670#if defined(TARGET_NR_eventfd2)
12671    case TARGET_NR_eventfd2:
12672    {
12673        int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12674        if (arg2 & TARGET_O_NONBLOCK) {
12675            host_flags |= O_NONBLOCK;
12676        }
12677        if (arg2 & TARGET_O_CLOEXEC) {
12678            host_flags |= O_CLOEXEC;
12679        }
12680        ret = get_errno(eventfd(arg1, host_flags));
12681        if (ret >= 0) {
12682            fd_trans_register(ret, &target_eventfd_trans);
12683        }
12684        return ret;
12685    }
12686#endif
12687#endif /* CONFIG_EVENTFD  */
12688#if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12689    case TARGET_NR_fallocate:
12690#if TARGET_ABI_BITS == 32
12691        ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12692                                  target_offset64(arg5, arg6)));
12693#else
12694        ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12695#endif
12696        return ret;
12697#endif
12698#if defined(CONFIG_SYNC_FILE_RANGE)
12699#if defined(TARGET_NR_sync_file_range)
12700    case TARGET_NR_sync_file_range:
12701#if TARGET_ABI_BITS == 32
12702#if defined(TARGET_MIPS)
12703        ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12704                                        target_offset64(arg5, arg6), arg7));
12705#else
12706        ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12707                                        target_offset64(arg4, arg5), arg6));
12708#endif /* !TARGET_MIPS */
12709#else
12710        ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12711#endif
12712        return ret;
12713#endif
12714#if defined(TARGET_NR_sync_file_range2) || \
12715    defined(TARGET_NR_arm_sync_file_range)
12716#if defined(TARGET_NR_sync_file_range2)
12717    case TARGET_NR_sync_file_range2:
12718#endif
12719#if defined(TARGET_NR_arm_sync_file_range)
12720    case TARGET_NR_arm_sync_file_range:
12721#endif
12722        /* This is like sync_file_range but the arguments are reordered */
12723#if TARGET_ABI_BITS == 32
12724        ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12725                                        target_offset64(arg5, arg6), arg2));
12726#else
12727        ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12728#endif
12729        return ret;
12730#endif
12731#endif
12732#if defined(TARGET_NR_signalfd4)
12733    case TARGET_NR_signalfd4:
12734        return do_signalfd4(arg1, arg2, arg4);
12735#endif
12736#if defined(TARGET_NR_signalfd)
12737    case TARGET_NR_signalfd:
12738        return do_signalfd4(arg1, arg2, 0);
12739#endif
12740#if defined(CONFIG_EPOLL)
12741#if defined(TARGET_NR_epoll_create)
12742    case TARGET_NR_epoll_create:
12743        return get_errno(epoll_create(arg1));
12744#endif
12745#if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12746    case TARGET_NR_epoll_create1:
12747        return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12748#endif
12749#if defined(TARGET_NR_epoll_ctl)
12750    case TARGET_NR_epoll_ctl:
12751    {
12752        struct epoll_event ep;
12753        struct epoll_event *epp = 0;
12754        if (arg4) {
12755            if (arg2 != EPOLL_CTL_DEL) {
12756                struct target_epoll_event *target_ep;
12757                if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12758                    return -TARGET_EFAULT;
12759                }
12760                ep.events = tswap32(target_ep->events);
12761                /*
12762                 * The epoll_data_t union is just opaque data to the kernel,
12763                 * so we transfer all 64 bits across and need not worry what
12764                 * actual data type it is.
12765                 */
12766                ep.data.u64 = tswap64(target_ep->data.u64);
12767                unlock_user_struct(target_ep, arg4, 0);
12768            }
12769            /*
12770             * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12771             * non-null pointer, even though this argument is ignored.
12772             *
12773             */
12774            epp = &ep;
12775        }
12776        return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12777    }
12778#endif
12779
12780#if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12781#if defined(TARGET_NR_epoll_wait)
12782    case TARGET_NR_epoll_wait:
12783#endif
12784#if defined(TARGET_NR_epoll_pwait)
12785    case TARGET_NR_epoll_pwait:
12786#endif
12787    {
12788        struct target_epoll_event *target_ep;
12789        struct epoll_event *ep;
12790        int epfd = arg1;
12791        int maxevents = arg3;
12792        int timeout = arg4;
12793
12794        if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12795            return -TARGET_EINVAL;
12796        }
12797
12798        target_ep = lock_user(VERIFY_WRITE, arg2,
12799                              maxevents * sizeof(struct target_epoll_event), 1);
12800        if (!target_ep) {
12801            return -TARGET_EFAULT;
12802        }
12803
12804        ep = g_try_new(struct epoll_event, maxevents);
12805        if (!ep) {
12806            unlock_user(target_ep, arg2, 0);
12807            return -TARGET_ENOMEM;
12808        }
12809
12810        switch (num) {
12811#if defined(TARGET_NR_epoll_pwait)
12812        case TARGET_NR_epoll_pwait:
12813        {
12814            target_sigset_t *target_set;
12815            sigset_t _set, *set = &_set;
12816
12817            if (arg5) {
12818                if (arg6 != sizeof(target_sigset_t)) {
12819                    ret = -TARGET_EINVAL;
12820                    break;
12821                }
12822
12823                target_set = lock_user(VERIFY_READ, arg5,
12824                                       sizeof(target_sigset_t), 1);
12825                if (!target_set) {
12826                    ret = -TARGET_EFAULT;
12827                    break;
12828                }
12829                target_to_host_sigset(set, target_set);
12830                unlock_user(target_set, arg5, 0);
12831            } else {
12832                set = NULL;
12833            }
12834
12835            ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12836                                             set, SIGSET_T_SIZE));
12837            break;
12838        }
12839#endif
12840#if defined(TARGET_NR_epoll_wait)
12841        case TARGET_NR_epoll_wait:
12842            ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12843                                             NULL, 0));
12844            break;
12845#endif
12846        default:
12847            ret = -TARGET_ENOSYS;
12848        }
12849        if (!is_error(ret)) {
12850            int i;
12851            for (i = 0; i < ret; i++) {
12852                target_ep[i].events = tswap32(ep[i].events);
12853                target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12854            }
12855            unlock_user(target_ep, arg2,
12856                        ret * sizeof(struct target_epoll_event));
12857        } else {
12858            unlock_user(target_ep, arg2, 0);
12859        }
12860        g_free(ep);
12861        return ret;
12862    }
12863#endif
12864#endif
12865#ifdef TARGET_NR_prlimit64
12866    case TARGET_NR_prlimit64:
12867    {
12868        /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12869        struct target_rlimit64 *target_rnew, *target_rold;
12870        struct host_rlimit64 rnew, rold, *rnewp = 0;
12871        int resource = target_to_host_resource(arg2);
12872
12873        if (arg3 && (resource != RLIMIT_AS &&
12874                     resource != RLIMIT_DATA &&
12875                     resource != RLIMIT_STACK)) {
12876            if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12877                return -TARGET_EFAULT;
12878            }
12879            rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12880            rnew.rlim_max = tswap64(target_rnew->rlim_max);
12881            unlock_user_struct(target_rnew, arg3, 0);
12882            rnewp = &rnew;
12883        }
12884
12885        ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12886        if (!is_error(ret) && arg4) {
12887            if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12888                return -TARGET_EFAULT;
12889            }
12890            target_rold->rlim_cur = tswap64(rold.rlim_cur);
12891            target_rold->rlim_max = tswap64(rold.rlim_max);
12892            unlock_user_struct(target_rold, arg4, 1);
12893        }
12894        return ret;
12895    }
12896#endif
12897#ifdef TARGET_NR_gethostname
12898    case TARGET_NR_gethostname:
12899    {
12900        char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12901        if (name) {
12902            ret = get_errno(gethostname(name, arg2));
12903            unlock_user(name, arg1, arg2);
12904        } else {
12905            ret = -TARGET_EFAULT;
12906        }
12907        return ret;
12908    }
12909#endif
12910#ifdef TARGET_NR_atomic_cmpxchg_32
12911    case TARGET_NR_atomic_cmpxchg_32:
12912    {
12913        /* should use start_exclusive from main.c */
12914        abi_ulong mem_value;
12915        if (get_user_u32(mem_value, arg6)) {
12916            target_siginfo_t info;
12917            info.si_signo = SIGSEGV;
12918            info.si_errno = 0;
12919            info.si_code = TARGET_SEGV_MAPERR;
12920            info._sifields._sigfault._addr = arg6;
12921            queue_signal((CPUArchState *)cpu_env, info.si_signo,
12922                         QEMU_SI_FAULT, &info);
12923            ret = 0xdeadbeef;
12924
12925        }
12926        if (mem_value == arg2)
12927            put_user_u32(arg1, arg6);
12928        return mem_value;
12929    }
12930#endif
12931#ifdef TARGET_NR_atomic_barrier
12932    case TARGET_NR_atomic_barrier:
12933        /* Like the kernel implementation and the
12934           qemu arm barrier, no-op this? */
12935        return 0;
12936#endif
12937
12938#ifdef TARGET_NR_timer_create
12939    case TARGET_NR_timer_create:
12940    {
12941        /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12942
12943        struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12944
12945        int clkid = arg1;
12946        int timer_index = next_free_host_timer();
12947
12948        if (timer_index < 0) {
12949            ret = -TARGET_EAGAIN;
12950        } else {
12951            timer_t *phtimer = g_posix_timers  + timer_index;
12952
12953            if (arg2) {
12954                phost_sevp = &host_sevp;
12955                ret = target_to_host_sigevent(phost_sevp, arg2);
12956                if (ret != 0) {
12957                    return ret;
12958                }
12959            }
12960
12961            ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12962            if (ret) {
12963                phtimer = NULL;
12964            } else {
12965                if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12966                    return -TARGET_EFAULT;
12967                }
12968            }
12969        }
12970        return ret;
12971    }
12972#endif
12973
12974#ifdef TARGET_NR_timer_settime
12975    case TARGET_NR_timer_settime:
12976    {
12977        /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12978         * struct itimerspec * old_value */
12979        target_timer_t timerid = get_timer_id(arg1);
12980
12981        if (timerid < 0) {
12982            ret = timerid;
12983        } else if (arg3 == 0) {
12984            ret = -TARGET_EINVAL;
12985        } else {
12986            timer_t htimer = g_posix_timers[timerid];
12987            struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12988
12989            if (target_to_host_itimerspec(&hspec_new, arg3)) {
12990                return -TARGET_EFAULT;
12991            }
12992            ret = get_errno(
12993                          timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12994            if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12995                return -TARGET_EFAULT;
12996            }
12997        }
12998        return ret;
12999    }
13000#endif
13001
13002#ifdef TARGET_NR_timer_settime64
13003    case TARGET_NR_timer_settime64:
13004    {
13005        target_timer_t timerid = get_timer_id(arg1);
13006
13007        if (timerid < 0) {
13008            ret = timerid;
13009        } else if (arg3 == 0) {
13010            ret = -TARGET_EINVAL;
13011        } else {
13012            timer_t htimer = g_posix_timers[timerid];
13013            struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13014
13015            if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13016                return -TARGET_EFAULT;
13017            }
13018            ret = get_errno(
13019                          timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13020            if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13021                return -TARGET_EFAULT;
13022            }
13023        }
13024        return ret;
13025    }
13026#endif
13027
13028#ifdef TARGET_NR_timer_gettime
13029    case TARGET_NR_timer_gettime:
13030    {
13031        /* args: timer_t timerid, struct itimerspec *curr_value */
13032        target_timer_t timerid = get_timer_id(arg1);
13033
13034        if (timerid < 0) {
13035            ret = timerid;
13036        } else if (!arg2) {
13037            ret = -TARGET_EFAULT;
13038        } else {
13039            timer_t htimer = g_posix_timers[timerid];
13040            struct itimerspec hspec;
13041            ret = get_errno(timer_gettime(htimer, &hspec));
13042
13043            if (host_to_target_itimerspec(arg2, &hspec)) {
13044                ret = -TARGET_EFAULT;
13045            }
13046        }
13047        return ret;
13048    }
13049#endif
13050
13051#ifdef TARGET_NR_timer_gettime64
13052    case TARGET_NR_timer_gettime64:
13053    {
13054        /* args: timer_t timerid, struct itimerspec64 *curr_value */
13055        target_timer_t timerid = get_timer_id(arg1);
13056
13057        if (timerid < 0) {
13058            ret = timerid;
13059        } else if (!arg2) {
13060            ret = -TARGET_EFAULT;
13061        } else {
13062            timer_t htimer = g_posix_timers[timerid];
13063            struct itimerspec hspec;
13064            ret = get_errno(timer_gettime(htimer, &hspec));
13065
13066            if (host_to_target_itimerspec64(arg2, &hspec)) {
13067                ret = -TARGET_EFAULT;
13068            }
13069        }
13070        return ret;
13071    }
13072#endif
13073
13074#ifdef TARGET_NR_timer_getoverrun
13075    case TARGET_NR_timer_getoverrun:
13076    {
13077        /* args: timer_t timerid */
13078        target_timer_t timerid = get_timer_id(arg1);
13079
13080        if (timerid < 0) {
13081            ret = timerid;
13082        } else {
13083            timer_t htimer = g_posix_timers[timerid];
13084            ret = get_errno(timer_getoverrun(htimer));
13085        }
13086        return ret;
13087    }
13088#endif
13089
13090#ifdef TARGET_NR_timer_delete
13091    case TARGET_NR_timer_delete:
13092    {
13093        /* args: timer_t timerid */
13094        target_timer_t timerid = get_timer_id(arg1);
13095
13096        if (timerid < 0) {
13097            ret = timerid;
13098        } else {
13099            timer_t htimer = g_posix_timers[timerid];
13100            ret = get_errno(timer_delete(htimer));
13101            g_posix_timers[timerid] = 0;
13102        }
13103        return ret;
13104    }
13105#endif
13106
13107#if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13108    case TARGET_NR_timerfd_create:
13109        return get_errno(timerfd_create(arg1,
13110                          target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13111#endif
13112
13113#if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13114    case TARGET_NR_timerfd_gettime:
13115        {
13116            struct itimerspec its_curr;
13117
13118            ret = get_errno(timerfd_gettime(arg1, &its_curr));
13119
13120            if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13121                return -TARGET_EFAULT;
13122            }
13123        }
13124        return ret;
13125#endif
13126
13127#if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13128    case TARGET_NR_timerfd_gettime64:
13129        {
13130            struct itimerspec its_curr;
13131
13132            ret = get_errno(timerfd_gettime(arg1, &its_curr));
13133
13134            if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13135                return -TARGET_EFAULT;
13136            }
13137        }
13138        return ret;
13139#endif
13140
13141#if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13142    case TARGET_NR_timerfd_settime:
13143        {
13144            struct itimerspec its_new, its_old, *p_new;
13145
13146            if (arg3) {
13147                if (target_to_host_itimerspec(&its_new, arg3)) {
13148                    return -TARGET_EFAULT;
13149                }
13150                p_new = &its_new;
13151            } else {
13152                p_new = NULL;
13153            }
13154
13155            ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13156
13157            if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13158                return -TARGET_EFAULT;
13159            }
13160        }
13161        return ret;
13162#endif
13163
13164#if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13165    case TARGET_NR_timerfd_settime64:
13166        {
13167            struct itimerspec its_new, its_old, *p_new;
13168
13169            if (arg3) {
13170                if (target_to_host_itimerspec64(&its_new, arg3)) {
13171                    return -TARGET_EFAULT;
13172                }
13173                p_new = &its_new;
13174            } else {
13175                p_new = NULL;
13176            }
13177
13178            ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13179
13180            if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13181                return -TARGET_EFAULT;
13182            }
13183        }
13184        return ret;
13185#endif
13186
13187#if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13188    case TARGET_NR_ioprio_get:
13189        return get_errno(ioprio_get(arg1, arg2));
13190#endif
13191
13192#if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13193    case TARGET_NR_ioprio_set:
13194        return get_errno(ioprio_set(arg1, arg2, arg3));
13195#endif
13196
13197#if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13198    case TARGET_NR_setns:
13199        return get_errno(setns(arg1, arg2));
13200#endif
13201#if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13202    case TARGET_NR_unshare:
13203        return get_errno(unshare(arg1));
13204#endif
13205#if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13206    case TARGET_NR_kcmp:
13207        return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13208#endif
13209#ifdef TARGET_NR_swapcontext
13210    case TARGET_NR_swapcontext:
13211        /* PowerPC specific.  */
13212        return do_swapcontext(cpu_env, arg1, arg2, arg3);
13213#endif
13214#ifdef TARGET_NR_memfd_create
13215    case TARGET_NR_memfd_create:
13216        p = lock_user_string(arg1);
13217        if (!p) {
13218            return -TARGET_EFAULT;
13219        }
13220        ret = get_errno(memfd_create(p, arg2));
13221        fd_trans_unregister(ret);
13222        unlock_user(p, arg1, 0);
13223        return ret;
13224#endif
13225#if defined TARGET_NR_membarrier && defined __NR_membarrier
13226    case TARGET_NR_membarrier:
13227        return get_errno(membarrier(arg1, arg2));
13228#endif
13229
13230#if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13231    case TARGET_NR_copy_file_range:
13232        {
13233            loff_t inoff, outoff;
13234            loff_t *pinoff = NULL, *poutoff = NULL;
13235
13236            if (arg2) {
13237                if (get_user_u64(inoff, arg2)) {
13238                    return -TARGET_EFAULT;
13239                }
13240                pinoff = &inoff;
13241            }
13242            if (arg4) {
13243                if (get_user_u64(outoff, arg4)) {
13244                    return -TARGET_EFAULT;
13245                }
13246                poutoff = &outoff;
13247            }
13248            ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13249                                                 arg5, arg6));
13250            if (!is_error(ret) && ret > 0) {
13251                if (arg2) {
13252                    if (put_user_u64(inoff, arg2)) {
13253                        return -TARGET_EFAULT;
13254                    }
13255                }
13256                if (arg4) {
13257                    if (put_user_u64(outoff, arg4)) {
13258                        return -TARGET_EFAULT;
13259                    }
13260                }
13261            }
13262        }
13263        return ret;
13264#endif
13265
13266    default:
13267        qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13268        return -TARGET_ENOSYS;
13269    }
13270    return ret;
13271}
13272
13273abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13274                    abi_long arg2, abi_long arg3, abi_long arg4,
13275                    abi_long arg5, abi_long arg6, abi_long arg7,
13276                    abi_long arg8)
13277{
13278    CPUState *cpu = env_cpu(cpu_env);
13279    abi_long ret;
13280
13281#ifdef DEBUG_ERESTARTSYS
13282    /* Debug-only code for exercising the syscall-restart code paths
13283     * in the per-architecture cpu main loops: restart every syscall
13284     * the guest makes once before letting it through.
13285     */
13286    {
13287        static bool flag;
13288        flag = !flag;
13289        if (flag) {
13290            return -TARGET_ERESTARTSYS;
13291        }
13292    }
13293#endif
13294
13295    record_syscall_start(cpu, num, arg1,
13296                         arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13297
13298    if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13299        print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13300    }
13301
13302    ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13303                      arg5, arg6, arg7, arg8);
13304
13305    if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13306        print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13307                          arg3, arg4, arg5, arg6);
13308    }
13309
13310    record_syscall_return(cpu, num, ret);
13311    return ret;
13312}
13313