qemu/linux-user/syscall.c
<<
>>
Prefs
   1/*
   2 *  Linux syscalls
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 *  This program is free software; you can redistribute it and/or modify
   7 *  it under the terms of the GNU General Public License as published by
   8 *  the Free Software Foundation; either version 2 of the License, or
   9 *  (at your option) any later version.
  10 *
  11 *  This program is distributed in the hope that it will be useful,
  12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 *  GNU General Public License for more details.
  15 *
  16 *  You should have received a copy of the GNU General Public License
  17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#define _ATFILE_SOURCE
  20#include "qemu/osdep.h"
  21#include "qemu/cutils.h"
  22#include "qemu/path.h"
  23#include "qemu/memfd.h"
  24#include "qemu/queue.h"
  25#include <elf.h>
  26#include <endian.h>
  27#include <grp.h>
  28#include <sys/ipc.h>
  29#include <sys/msg.h>
  30#include <sys/wait.h>
  31#include <sys/mount.h>
  32#include <sys/file.h>
  33#include <sys/fsuid.h>
  34#include <sys/personality.h>
  35#include <sys/prctl.h>
  36#include <sys/resource.h>
  37#include <sys/swap.h>
  38#include <linux/capability.h>
  39#include <sched.h>
  40#include <sys/timex.h>
  41#include <sys/socket.h>
  42#include <linux/sockios.h>
  43#include <sys/un.h>
  44#include <sys/uio.h>
  45#include <poll.h>
  46#include <sys/times.h>
  47#include <sys/shm.h>
  48#include <sys/sem.h>
  49#include <sys/statfs.h>
  50#include <utime.h>
  51#include <sys/sysinfo.h>
  52#include <sys/signalfd.h>
  53//#include <sys/user.h>
  54#include <netinet/in.h>
  55#include <netinet/ip.h>
  56#include <netinet/tcp.h>
  57#include <netinet/udp.h>
  58#include <linux/wireless.h>
  59#include <linux/icmp.h>
  60#include <linux/icmpv6.h>
  61#include <linux/if_tun.h>
  62#include <linux/in6.h>
  63#include <linux/errqueue.h>
  64#include <linux/random.h>
  65#ifdef CONFIG_TIMERFD
  66#include <sys/timerfd.h>
  67#endif
  68#ifdef CONFIG_EVENTFD
  69#include <sys/eventfd.h>
  70#endif
  71#ifdef CONFIG_EPOLL
  72#include <sys/epoll.h>
  73#endif
  74#ifdef CONFIG_ATTR
  75#include "qemu/xattr.h"
  76#endif
  77#ifdef CONFIG_SENDFILE
  78#include <sys/sendfile.h>
  79#endif
  80#ifdef HAVE_SYS_KCOV_H
  81#include <sys/kcov.h>
  82#endif
  83
  84#define termios host_termios
  85#define winsize host_winsize
  86#define termio host_termio
  87#define sgttyb host_sgttyb /* same as target */
  88#define tchars host_tchars /* same as target */
  89#define ltchars host_ltchars /* same as target */
  90
  91#include <linux/termios.h>
  92#include <linux/unistd.h>
  93#include <linux/cdrom.h>
  94#include <linux/hdreg.h>
  95#include <linux/soundcard.h>
  96#include <linux/kd.h>
  97#include <linux/mtio.h>
  98
  99#ifdef HAVE_SYS_MOUNT_FSCONFIG
 100/*
 101 * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h,
 102 * which in turn prevents use of linux/fs.h. So we have to
 103 * define the constants ourselves for now.
 104 */
 105#define FS_IOC_GETFLAGS                _IOR('f', 1, long)
 106#define FS_IOC_SETFLAGS                _IOW('f', 2, long)
 107#define FS_IOC_GETVERSION              _IOR('v', 1, long)
 108#define FS_IOC_SETVERSION              _IOW('v', 2, long)
 109#define FS_IOC_FIEMAP                  _IOWR('f', 11, struct fiemap)
 110#define FS_IOC32_GETFLAGS              _IOR('f', 1, int)
 111#define FS_IOC32_SETFLAGS              _IOW('f', 2, int)
 112#define FS_IOC32_GETVERSION            _IOR('v', 1, int)
 113#define FS_IOC32_SETVERSION            _IOW('v', 2, int)
 114
 115#define BLKGETSIZE64 _IOR(0x12,114,size_t)
 116#define BLKDISCARD _IO(0x12,119)
 117#define BLKIOMIN _IO(0x12,120)
 118#define BLKIOOPT _IO(0x12,121)
 119#define BLKALIGNOFF _IO(0x12,122)
 120#define BLKPBSZGET _IO(0x12,123)
 121#define BLKDISCARDZEROES _IO(0x12,124)
 122#define BLKSECDISCARD _IO(0x12,125)
 123#define BLKROTATIONAL _IO(0x12,126)
 124#define BLKZEROOUT _IO(0x12,127)
 125
 126#define FIBMAP     _IO(0x00,1)
 127#define FIGETBSZ   _IO(0x00,2)
 128
 129struct file_clone_range {
 130        __s64 src_fd;
 131        __u64 src_offset;
 132        __u64 src_length;
 133        __u64 dest_offset;
 134};
 135
 136#define FICLONE         _IOW(0x94, 9, int)
 137#define FICLONERANGE    _IOW(0x94, 13, struct file_clone_range)
 138
 139#else
 140#include <linux/fs.h>
 141#endif
 142#include <linux/fd.h>
 143#if defined(CONFIG_FIEMAP)
 144#include <linux/fiemap.h>
 145#endif
 146#include <linux/fb.h>
 147#if defined(CONFIG_USBFS)
 148#include <linux/usbdevice_fs.h>
 149#include <linux/usb/ch9.h>
 150#endif
 151#include <linux/vt.h>
 152#include <linux/dm-ioctl.h>
 153#include <linux/reboot.h>
 154#include <linux/route.h>
 155#include <linux/filter.h>
 156#include <linux/blkpg.h>
 157#include <netpacket/packet.h>
 158#include <linux/netlink.h>
 159#include <linux/if_alg.h>
 160#include <linux/rtc.h>
 161#include <sound/asound.h>
 162#ifdef HAVE_BTRFS_H
 163#include <linux/btrfs.h>
 164#endif
 165#ifdef HAVE_DRM_H
 166#include <libdrm/drm.h>
 167#include <libdrm/i915_drm.h>
 168#endif
 169#include "linux_loop.h"
 170#include "uname.h"
 171
 172#include "qemu.h"
 173#include "user-internals.h"
 174#include "strace.h"
 175#include "signal-common.h"
 176#include "loader.h"
 177#include "user-mmap.h"
 178#include "user/safe-syscall.h"
 179#include "qemu/guest-random.h"
 180#include "qemu/selfmap.h"
 181#include "user/syscall-trace.h"
 182#include "special-errno.h"
 183#include "qapi/error.h"
 184#include "fd-trans.h"
 185#include "tcg/tcg.h"
 186#include "cpu_loop-common.h"
 187
 188#ifndef CLONE_IO
 189#define CLONE_IO                0x80000000      /* Clone io context */
 190#endif
 191
 192/* We can't directly call the host clone syscall, because this will
 193 * badly confuse libc (breaking mutexes, for example). So we must
 194 * divide clone flags into:
 195 *  * flag combinations that look like pthread_create()
 196 *  * flag combinations that look like fork()
 197 *  * flags we can implement within QEMU itself
 198 *  * flags we can't support and will return an error for
 199 */
 200/* For thread creation, all these flags must be present; for
 201 * fork, none must be present.
 202 */
 203#define CLONE_THREAD_FLAGS                              \
 204    (CLONE_VM | CLONE_FS | CLONE_FILES |                \
 205     CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
 206
 207/* These flags are ignored:
 208 * CLONE_DETACHED is now ignored by the kernel;
 209 * CLONE_IO is just an optimisation hint to the I/O scheduler
 210 */
 211#define CLONE_IGNORED_FLAGS                     \
 212    (CLONE_DETACHED | CLONE_IO)
 213
 214/* Flags for fork which we can implement within QEMU itself */
 215#define CLONE_OPTIONAL_FORK_FLAGS               \
 216    (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
 217     CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
 218
 219/* Flags for thread creation which we can implement within QEMU itself */
 220#define CLONE_OPTIONAL_THREAD_FLAGS                             \
 221    (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
 222     CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
 223
 224#define CLONE_INVALID_FORK_FLAGS                                        \
 225    (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
 226
 227#define CLONE_INVALID_THREAD_FLAGS                                      \
 228    (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
 229       CLONE_IGNORED_FLAGS))
 230
 231/* CLONE_VFORK is special cased early in do_fork(). The other flag bits
 232 * have almost all been allocated. We cannot support any of
 233 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
 234 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
 235 * The checks against the invalid thread masks above will catch these.
 236 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
 237 */
 238
 239/* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
 240 * once. This exercises the codepaths for restart.
 241 */
 242//#define DEBUG_ERESTARTSYS
 243
 244//#include <linux/msdos_fs.h>
 245#define VFAT_IOCTL_READDIR_BOTH \
 246    _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
 247#define VFAT_IOCTL_READDIR_SHORT \
 248    _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
 249
 250#undef _syscall0
 251#undef _syscall1
 252#undef _syscall2
 253#undef _syscall3
 254#undef _syscall4
 255#undef _syscall5
 256#undef _syscall6
 257
 258#define _syscall0(type,name)            \
 259static type name (void)                 \
 260{                                       \
 261        return syscall(__NR_##name);    \
 262}
 263
 264#define _syscall1(type,name,type1,arg1)         \
 265static type name (type1 arg1)                   \
 266{                                               \
 267        return syscall(__NR_##name, arg1);      \
 268}
 269
 270#define _syscall2(type,name,type1,arg1,type2,arg2)      \
 271static type name (type1 arg1,type2 arg2)                \
 272{                                                       \
 273        return syscall(__NR_##name, arg1, arg2);        \
 274}
 275
 276#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)   \
 277static type name (type1 arg1,type2 arg2,type3 arg3)             \
 278{                                                               \
 279        return syscall(__NR_##name, arg1, arg2, arg3);          \
 280}
 281
 282#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)        \
 283static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)                  \
 284{                                                                               \
 285        return syscall(__NR_##name, arg1, arg2, arg3, arg4);                    \
 286}
 287
 288#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,        \
 289                  type5,arg5)                                                   \
 290static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)       \
 291{                                                                               \
 292        return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);              \
 293}
 294
 295
 296#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,        \
 297                  type5,arg5,type6,arg6)                                        \
 298static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,       \
 299                  type6 arg6)                                                   \
 300{                                                                               \
 301        return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);        \
 302}
 303
 304
 305#define __NR_sys_uname __NR_uname
 306#define __NR_sys_getcwd1 __NR_getcwd
 307#define __NR_sys_getdents __NR_getdents
 308#define __NR_sys_getdents64 __NR_getdents64
 309#define __NR_sys_getpriority __NR_getpriority
 310#define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
 311#define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
 312#define __NR_sys_syslog __NR_syslog
 313#if defined(__NR_futex)
 314# define __NR_sys_futex __NR_futex
 315#endif
 316#if defined(__NR_futex_time64)
 317# define __NR_sys_futex_time64 __NR_futex_time64
 318#endif
 319#define __NR_sys_statx __NR_statx
 320
 321#if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
 322#define __NR__llseek __NR_lseek
 323#endif
 324
 325/* Newer kernel ports have llseek() instead of _llseek() */
 326#if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
 327#define TARGET_NR__llseek TARGET_NR_llseek
 328#endif
 329
 330/* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
 331#ifndef TARGET_O_NONBLOCK_MASK
 332#define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
 333#endif
 334
 335#define __NR_sys_gettid __NR_gettid
 336_syscall0(int, sys_gettid)
 337
 338/* For the 64-bit guest on 32-bit host case we must emulate
 339 * getdents using getdents64, because otherwise the host
 340 * might hand us back more dirent records than we can fit
 341 * into the guest buffer after structure format conversion.
 342 * Otherwise we emulate getdents with getdents if the host has it.
 343 */
 344#if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
 345#define EMULATE_GETDENTS_WITH_GETDENTS
 346#endif
 347
 348#if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
 349_syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
 350#endif
 351#if (defined(TARGET_NR_getdents) && \
 352      !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
 353    (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
 354_syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
 355#endif
 356#if defined(TARGET_NR__llseek) && defined(__NR_llseek)
 357_syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
 358          loff_t *, res, uint, wh);
 359#endif
 360_syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
 361_syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
 362          siginfo_t *, uinfo)
 363_syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
 364#ifdef __NR_exit_group
 365_syscall1(int,exit_group,int,error_code)
 366#endif
 367#if defined(__NR_close_range) && defined(TARGET_NR_close_range)
 368#define __NR_sys_close_range __NR_close_range
 369_syscall3(int,sys_close_range,int,first,int,last,int,flags)
 370#ifndef CLOSE_RANGE_CLOEXEC
 371#define CLOSE_RANGE_CLOEXEC     (1U << 2)
 372#endif
 373#endif
 374#if defined(__NR_futex)
 375_syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
 376          const struct timespec *,timeout,int *,uaddr2,int,val3)
 377#endif
 378#if defined(__NR_futex_time64)
 379_syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
 380          const struct timespec *,timeout,int *,uaddr2,int,val3)
 381#endif
 382#if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
 383_syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
 384#endif
 385#if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
 386_syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
 387                             unsigned int, flags);
 388#endif
 389#if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
 390_syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
 391#endif
 392#define __NR_sys_sched_getaffinity __NR_sched_getaffinity
 393_syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
 394          unsigned long *, user_mask_ptr);
 395#define __NR_sys_sched_setaffinity __NR_sched_setaffinity
 396_syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
 397          unsigned long *, user_mask_ptr);
 398/* sched_attr is not defined in glibc */
 399struct sched_attr {
 400    uint32_t size;
 401    uint32_t sched_policy;
 402    uint64_t sched_flags;
 403    int32_t sched_nice;
 404    uint32_t sched_priority;
 405    uint64_t sched_runtime;
 406    uint64_t sched_deadline;
 407    uint64_t sched_period;
 408    uint32_t sched_util_min;
 409    uint32_t sched_util_max;
 410};
 411#define __NR_sys_sched_getattr __NR_sched_getattr
 412_syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
 413          unsigned int, size, unsigned int, flags);
 414#define __NR_sys_sched_setattr __NR_sched_setattr
 415_syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
 416          unsigned int, flags);
 417#define __NR_sys_sched_getscheduler __NR_sched_getscheduler
 418_syscall1(int, sys_sched_getscheduler, pid_t, pid);
 419#define __NR_sys_sched_setscheduler __NR_sched_setscheduler
 420_syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
 421          const struct sched_param *, param);
 422#define __NR_sys_sched_getparam __NR_sched_getparam
 423_syscall2(int, sys_sched_getparam, pid_t, pid,
 424          struct sched_param *, param);
 425#define __NR_sys_sched_setparam __NR_sched_setparam
 426_syscall2(int, sys_sched_setparam, pid_t, pid,
 427          const struct sched_param *, param);
 428#define __NR_sys_getcpu __NR_getcpu
 429_syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
 430_syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
 431          void *, arg);
 432_syscall2(int, capget, struct __user_cap_header_struct *, header,
 433          struct __user_cap_data_struct *, data);
 434_syscall2(int, capset, struct __user_cap_header_struct *, header,
 435          struct __user_cap_data_struct *, data);
 436#if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
 437_syscall2(int, ioprio_get, int, which, int, who)
 438#endif
 439#if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
 440_syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
 441#endif
 442#if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
 443_syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
 444#endif
 445
 446#if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
 447_syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
 448          unsigned long, idx1, unsigned long, idx2)
 449#endif
 450
 451/*
 452 * It is assumed that struct statx is architecture independent.
 453 */
 454#if defined(TARGET_NR_statx) && defined(__NR_statx)
 455_syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
 456          unsigned int, mask, struct target_statx *, statxbuf)
 457#endif
 458#if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
 459_syscall2(int, membarrier, int, cmd, int, flags)
 460#endif
 461
 462static const bitmask_transtbl fcntl_flags_tbl[] = {
 463  { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
 464  { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
 465  { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
 466  { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
 467  { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
 468  { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
 469  { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
 470  { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
 471  { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
 472  { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
 473  { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
 474  { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
 475  { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
 476#if defined(O_DIRECT)
 477  { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
 478#endif
 479#if defined(O_NOATIME)
 480  { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
 481#endif
 482#if defined(O_CLOEXEC)
 483  { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
 484#endif
 485#if defined(O_PATH)
 486  { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
 487#endif
 488#if defined(O_TMPFILE)
 489  { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
 490#endif
 491  /* Don't terminate the list prematurely on 64-bit host+guest.  */
 492#if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
 493  { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
 494#endif
 495  { 0, 0, 0, 0 }
 496};
 497
 498_syscall2(int, sys_getcwd1, char *, buf, size_t, size)
 499
 500#if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
 501#if defined(__NR_utimensat)
 502#define __NR_sys_utimensat __NR_utimensat
 503_syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
 504          const struct timespec *,tsp,int,flags)
 505#else
 506static int sys_utimensat(int dirfd, const char *pathname,
 507                         const struct timespec times[2], int flags)
 508{
 509    errno = ENOSYS;
 510    return -1;
 511}
 512#endif
 513#endif /* TARGET_NR_utimensat */
 514
 515#ifdef TARGET_NR_renameat2
 516#if defined(__NR_renameat2)
 517#define __NR_sys_renameat2 __NR_renameat2
 518_syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
 519          const char *, new, unsigned int, flags)
 520#else
 521static int sys_renameat2(int oldfd, const char *old,
 522                         int newfd, const char *new, int flags)
 523{
 524    if (flags == 0) {
 525        return renameat(oldfd, old, newfd, new);
 526    }
 527    errno = ENOSYS;
 528    return -1;
 529}
 530#endif
 531#endif /* TARGET_NR_renameat2 */
 532
 533#ifdef CONFIG_INOTIFY
 534#include <sys/inotify.h>
 535#else
 536/* Userspace can usually survive runtime without inotify */
 537#undef TARGET_NR_inotify_init
 538#undef TARGET_NR_inotify_init1
 539#undef TARGET_NR_inotify_add_watch
 540#undef TARGET_NR_inotify_rm_watch
 541#endif /* CONFIG_INOTIFY  */
 542
 543#if defined(TARGET_NR_prlimit64)
 544#ifndef __NR_prlimit64
 545# define __NR_prlimit64 -1
 546#endif
 547#define __NR_sys_prlimit64 __NR_prlimit64
 548/* The glibc rlimit structure may not be that used by the underlying syscall */
 549struct host_rlimit64 {
 550    uint64_t rlim_cur;
 551    uint64_t rlim_max;
 552};
 553_syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
 554          const struct host_rlimit64 *, new_limit,
 555          struct host_rlimit64 *, old_limit)
 556#endif
 557
 558
 559#if defined(TARGET_NR_timer_create)
 560/* Maximum of 32 active POSIX timers allowed at any one time. */
 561#define GUEST_TIMER_MAX 32
 562static timer_t g_posix_timers[GUEST_TIMER_MAX];
 563static int g_posix_timer_allocated[GUEST_TIMER_MAX];
 564
 565static inline int next_free_host_timer(void)
 566{
 567    int k;
 568    for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
 569        if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
 570            return k;
 571        }
 572    }
 573    return -1;
 574}
 575
 576static inline void free_host_timer_slot(int id)
 577{
 578    qatomic_store_release(g_posix_timer_allocated + id, 0);
 579}
 580#endif
 581
 582static inline int host_to_target_errno(int host_errno)
 583{
 584    switch (host_errno) {
 585#define E(X)  case X: return TARGET_##X;
 586#include "errnos.c.inc"
 587#undef E
 588    default:
 589        return host_errno;
 590    }
 591}
 592
 593static inline int target_to_host_errno(int target_errno)
 594{
 595    switch (target_errno) {
 596#define E(X)  case TARGET_##X: return X;
 597#include "errnos.c.inc"
 598#undef E
 599    default:
 600        return target_errno;
 601    }
 602}
 603
 604abi_long get_errno(abi_long ret)
 605{
 606    if (ret == -1)
 607        return -host_to_target_errno(errno);
 608    else
 609        return ret;
 610}
 611
 612const char *target_strerror(int err)
 613{
 614    if (err == QEMU_ERESTARTSYS) {
 615        return "To be restarted";
 616    }
 617    if (err == QEMU_ESIGRETURN) {
 618        return "Successful exit from sigreturn";
 619    }
 620
 621    return strerror(target_to_host_errno(err));
 622}
 623
 624static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
 625{
 626    int i;
 627    uint8_t b;
 628    if (usize <= ksize) {
 629        return 1;
 630    }
 631    for (i = ksize; i < usize; i++) {
 632        if (get_user_u8(b, addr + i)) {
 633            return -TARGET_EFAULT;
 634        }
 635        if (b != 0) {
 636            return 0;
 637        }
 638    }
 639    return 1;
 640}
 641
 642#define safe_syscall0(type, name) \
 643static type safe_##name(void) \
 644{ \
 645    return safe_syscall(__NR_##name); \
 646}
 647
 648#define safe_syscall1(type, name, type1, arg1) \
 649static type safe_##name(type1 arg1) \
 650{ \
 651    return safe_syscall(__NR_##name, arg1); \
 652}
 653
 654#define safe_syscall2(type, name, type1, arg1, type2, arg2) \
 655static type safe_##name(type1 arg1, type2 arg2) \
 656{ \
 657    return safe_syscall(__NR_##name, arg1, arg2); \
 658}
 659
 660#define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
 661static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
 662{ \
 663    return safe_syscall(__NR_##name, arg1, arg2, arg3); \
 664}
 665
 666#define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
 667    type4, arg4) \
 668static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
 669{ \
 670    return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
 671}
 672
 673#define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
 674    type4, arg4, type5, arg5) \
 675static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
 676    type5 arg5) \
 677{ \
 678    return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
 679}
 680
 681#define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
 682    type4, arg4, type5, arg5, type6, arg6) \
 683static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
 684    type5 arg5, type6 arg6) \
 685{ \
 686    return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
 687}
 688
 689safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
 690safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
 691safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
 692              int, flags, mode_t, mode)
 693#if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
 694safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
 695              struct rusage *, rusage)
 696#endif
 697safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
 698              int, options, struct rusage *, rusage)
 699safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
 700#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
 701    defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
 702safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
 703              fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
 704#endif
 705#if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
 706safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
 707              struct timespec *, tsp, const sigset_t *, sigmask,
 708              size_t, sigsetsize)
 709#endif
 710safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
 711              int, maxevents, int, timeout, const sigset_t *, sigmask,
 712              size_t, sigsetsize)
 713#if defined(__NR_futex)
 714safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
 715              const struct timespec *,timeout,int *,uaddr2,int,val3)
 716#endif
 717#if defined(__NR_futex_time64)
 718safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
 719              const struct timespec *,timeout,int *,uaddr2,int,val3)
 720#endif
 721safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
 722safe_syscall2(int, kill, pid_t, pid, int, sig)
 723safe_syscall2(int, tkill, int, tid, int, sig)
 724safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
 725safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
 726safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
 727safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
 728              unsigned long, pos_l, unsigned long, pos_h)
 729safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
 730              unsigned long, pos_l, unsigned long, pos_h)
 731safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
 732              socklen_t, addrlen)
 733safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
 734              int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
 735safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
 736              int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
 737safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
 738safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
 739safe_syscall2(int, flock, int, fd, int, operation)
 740#if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
 741safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
 742              const struct timespec *, uts, size_t, sigsetsize)
 743#endif
 744safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
 745              int, flags)
 746#if defined(TARGET_NR_nanosleep)
 747safe_syscall2(int, nanosleep, const struct timespec *, req,
 748              struct timespec *, rem)
 749#endif
 750#if defined(TARGET_NR_clock_nanosleep) || \
 751    defined(TARGET_NR_clock_nanosleep_time64)
 752safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
 753              const struct timespec *, req, struct timespec *, rem)
 754#endif
 755#ifdef __NR_ipc
 756#ifdef __s390x__
 757safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
 758              void *, ptr)
 759#else
 760safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
 761              void *, ptr, long, fifth)
 762#endif
 763#endif
 764#ifdef __NR_msgsnd
 765safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
 766              int, flags)
 767#endif
 768#ifdef __NR_msgrcv
 769safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
 770              long, msgtype, int, flags)
 771#endif
 772#ifdef __NR_semtimedop
 773safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
 774              unsigned, nsops, const struct timespec *, timeout)
 775#endif
 776#if defined(TARGET_NR_mq_timedsend) || \
 777    defined(TARGET_NR_mq_timedsend_time64)
 778safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
 779              size_t, len, unsigned, prio, const struct timespec *, timeout)
 780#endif
 781#if defined(TARGET_NR_mq_timedreceive) || \
 782    defined(TARGET_NR_mq_timedreceive_time64)
 783safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
 784              size_t, len, unsigned *, prio, const struct timespec *, timeout)
 785#endif
 786#if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
 787safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
 788              int, outfd, loff_t *, poutoff, size_t, length,
 789              unsigned int, flags)
 790#endif
 791
 792/* We do ioctl like this rather than via safe_syscall3 to preserve the
 793 * "third argument might be integer or pointer or not present" behaviour of
 794 * the libc function.
 795 */
 796#define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
 797/* Similarly for fcntl. Note that callers must always:
 798 *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
 799 *  use the flock64 struct rather than unsuffixed flock
 800 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
 801 */
 802#ifdef __NR_fcntl64
 803#define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
 804#else
 805#define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
 806#endif
 807
 808static inline int host_to_target_sock_type(int host_type)
 809{
 810    int target_type;
 811
 812    switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
 813    case SOCK_DGRAM:
 814        target_type = TARGET_SOCK_DGRAM;
 815        break;
 816    case SOCK_STREAM:
 817        target_type = TARGET_SOCK_STREAM;
 818        break;
 819    default:
 820        target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
 821        break;
 822    }
 823
 824#if defined(SOCK_CLOEXEC)
 825    if (host_type & SOCK_CLOEXEC) {
 826        target_type |= TARGET_SOCK_CLOEXEC;
 827    }
 828#endif
 829
 830#if defined(SOCK_NONBLOCK)
 831    if (host_type & SOCK_NONBLOCK) {
 832        target_type |= TARGET_SOCK_NONBLOCK;
 833    }
 834#endif
 835
 836    return target_type;
 837}
 838
 839static abi_ulong target_brk;
 840static abi_ulong target_original_brk;
 841static abi_ulong brk_page;
 842
 843void target_set_brk(abi_ulong new_brk)
 844{
 845    target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
 846    brk_page = HOST_PAGE_ALIGN(target_brk);
 847}
 848
 849//#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
 850#define DEBUGF_BRK(message, args...)
 851
 852/* do_brk() must return target values and target errnos. */
 853abi_long do_brk(abi_ulong new_brk)
 854{
 855    abi_long mapped_addr;
 856    abi_ulong new_alloc_size;
 857
 858    /* brk pointers are always untagged */
 859
 860    DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
 861
 862    if (!new_brk) {
 863        DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
 864        return target_brk;
 865    }
 866    if (new_brk < target_original_brk) {
 867        DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
 868                   target_brk);
 869        return target_brk;
 870    }
 871
 872    /* If the new brk is less than the highest page reserved to the
 873     * target heap allocation, set it and we're almost done...  */
 874    if (new_brk <= brk_page) {
 875        /* Heap contents are initialized to zero, as for anonymous
 876         * mapped pages.  */
 877        if (new_brk > target_brk) {
 878            memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
 879        }
 880        target_brk = new_brk;
 881        DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
 882        return target_brk;
 883    }
 884
 885    /* We need to allocate more memory after the brk... Note that
 886     * we don't use MAP_FIXED because that will map over the top of
 887     * any existing mapping (like the one with the host libc or qemu
 888     * itself); instead we treat "mapped but at wrong address" as
 889     * a failure and unmap again.
 890     */
 891    new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
 892    mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
 893                                        PROT_READ|PROT_WRITE,
 894                                        MAP_ANON|MAP_PRIVATE, 0, 0));
 895
 896    if (mapped_addr == brk_page) {
 897        /* Heap contents are initialized to zero, as for anonymous
 898         * mapped pages.  Technically the new pages are already
 899         * initialized to zero since they *are* anonymous mapped
 900         * pages, however we have to take care with the contents that
 901         * come from the remaining part of the previous page: it may
 902         * contains garbage data due to a previous heap usage (grown
 903         * then shrunken).  */
 904        memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
 905
 906        target_brk = new_brk;
 907        brk_page = HOST_PAGE_ALIGN(target_brk);
 908        DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
 909            target_brk);
 910        return target_brk;
 911    } else if (mapped_addr != -1) {
 912        /* Mapped but at wrong address, meaning there wasn't actually
 913         * enough space for this brk.
 914         */
 915        target_munmap(mapped_addr, new_alloc_size);
 916        mapped_addr = -1;
 917        DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
 918    }
 919    else {
 920        DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
 921    }
 922
 923#if defined(TARGET_ALPHA)
 924    /* We (partially) emulate OSF/1 on Alpha, which requires we
 925       return a proper errno, not an unchanged brk value.  */
 926    return -TARGET_ENOMEM;
 927#endif
 928    /* For everything else, return the previous break. */
 929    return target_brk;
 930}
 931
 932#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
 933    defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
 934static inline abi_long copy_from_user_fdset(fd_set *fds,
 935                                            abi_ulong target_fds_addr,
 936                                            int n)
 937{
 938    int i, nw, j, k;
 939    abi_ulong b, *target_fds;
 940
 941    nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
 942    if (!(target_fds = lock_user(VERIFY_READ,
 943                                 target_fds_addr,
 944                                 sizeof(abi_ulong) * nw,
 945                                 1)))
 946        return -TARGET_EFAULT;
 947
 948    FD_ZERO(fds);
 949    k = 0;
 950    for (i = 0; i < nw; i++) {
 951        /* grab the abi_ulong */
 952        __get_user(b, &target_fds[i]);
 953        for (j = 0; j < TARGET_ABI_BITS; j++) {
 954            /* check the bit inside the abi_ulong */
 955            if ((b >> j) & 1)
 956                FD_SET(k, fds);
 957            k++;
 958        }
 959    }
 960
 961    unlock_user(target_fds, target_fds_addr, 0);
 962
 963    return 0;
 964}
 965
 966static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
 967                                                 abi_ulong target_fds_addr,
 968                                                 int n)
 969{
 970    if (target_fds_addr) {
 971        if (copy_from_user_fdset(fds, target_fds_addr, n))
 972            return -TARGET_EFAULT;
 973        *fds_ptr = fds;
 974    } else {
 975        *fds_ptr = NULL;
 976    }
 977    return 0;
 978}
 979
 980static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
 981                                          const fd_set *fds,
 982                                          int n)
 983{
 984    int i, nw, j, k;
 985    abi_long v;
 986    abi_ulong *target_fds;
 987
 988    nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
 989    if (!(target_fds = lock_user(VERIFY_WRITE,
 990                                 target_fds_addr,
 991                                 sizeof(abi_ulong) * nw,
 992                                 0)))
 993        return -TARGET_EFAULT;
 994
 995    k = 0;
 996    for (i = 0; i < nw; i++) {
 997        v = 0;
 998        for (j = 0; j < TARGET_ABI_BITS; j++) {
 999            v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1000            k++;
1001        }
1002        __put_user(v, &target_fds[i]);
1003    }
1004
1005    unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1006
1007    return 0;
1008}
1009#endif
1010
1011#if defined(__alpha__)
1012#define HOST_HZ 1024
1013#else
1014#define HOST_HZ 100
1015#endif
1016
1017static inline abi_long host_to_target_clock_t(long ticks)
1018{
1019#if HOST_HZ == TARGET_HZ
1020    return ticks;
1021#else
1022    return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1023#endif
1024}
1025
1026static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1027                                             const struct rusage *rusage)
1028{
1029    struct target_rusage *target_rusage;
1030
1031    if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1032        return -TARGET_EFAULT;
1033    target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1034    target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1035    target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1036    target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1037    target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1038    target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1039    target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1040    target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1041    target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1042    target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1043    target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1044    target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1045    target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1046    target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1047    target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1048    target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1049    target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1050    target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1051    unlock_user_struct(target_rusage, target_addr, 1);
1052
1053    return 0;
1054}
1055
1056#ifdef TARGET_NR_setrlimit
1057static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1058{
1059    abi_ulong target_rlim_swap;
1060    rlim_t result;
1061    
1062    target_rlim_swap = tswapal(target_rlim);
1063    if (target_rlim_swap == TARGET_RLIM_INFINITY)
1064        return RLIM_INFINITY;
1065
1066    result = target_rlim_swap;
1067    if (target_rlim_swap != (rlim_t)result)
1068        return RLIM_INFINITY;
1069    
1070    return result;
1071}
1072#endif
1073
1074#if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1075static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1076{
1077    abi_ulong target_rlim_swap;
1078    abi_ulong result;
1079    
1080    if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1081        target_rlim_swap = TARGET_RLIM_INFINITY;
1082    else
1083        target_rlim_swap = rlim;
1084    result = tswapal(target_rlim_swap);
1085    
1086    return result;
1087}
1088#endif
1089
1090static inline int target_to_host_resource(int code)
1091{
1092    switch (code) {
1093    case TARGET_RLIMIT_AS:
1094        return RLIMIT_AS;
1095    case TARGET_RLIMIT_CORE:
1096        return RLIMIT_CORE;
1097    case TARGET_RLIMIT_CPU:
1098        return RLIMIT_CPU;
1099    case TARGET_RLIMIT_DATA:
1100        return RLIMIT_DATA;
1101    case TARGET_RLIMIT_FSIZE:
1102        return RLIMIT_FSIZE;
1103    case TARGET_RLIMIT_LOCKS:
1104        return RLIMIT_LOCKS;
1105    case TARGET_RLIMIT_MEMLOCK:
1106        return RLIMIT_MEMLOCK;
1107    case TARGET_RLIMIT_MSGQUEUE:
1108        return RLIMIT_MSGQUEUE;
1109    case TARGET_RLIMIT_NICE:
1110        return RLIMIT_NICE;
1111    case TARGET_RLIMIT_NOFILE:
1112        return RLIMIT_NOFILE;
1113    case TARGET_RLIMIT_NPROC:
1114        return RLIMIT_NPROC;
1115    case TARGET_RLIMIT_RSS:
1116        return RLIMIT_RSS;
1117    case TARGET_RLIMIT_RTPRIO:
1118        return RLIMIT_RTPRIO;
1119#ifdef RLIMIT_RTTIME
1120    case TARGET_RLIMIT_RTTIME:
1121        return RLIMIT_RTTIME;
1122#endif
1123    case TARGET_RLIMIT_SIGPENDING:
1124        return RLIMIT_SIGPENDING;
1125    case TARGET_RLIMIT_STACK:
1126        return RLIMIT_STACK;
1127    default:
1128        return code;
1129    }
1130}
1131
1132static inline abi_long copy_from_user_timeval(struct timeval *tv,
1133                                              abi_ulong target_tv_addr)
1134{
1135    struct target_timeval *target_tv;
1136
1137    if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1138        return -TARGET_EFAULT;
1139    }
1140
1141    __get_user(tv->tv_sec, &target_tv->tv_sec);
1142    __get_user(tv->tv_usec, &target_tv->tv_usec);
1143
1144    unlock_user_struct(target_tv, target_tv_addr, 0);
1145
1146    return 0;
1147}
1148
1149static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1150                                            const struct timeval *tv)
1151{
1152    struct target_timeval *target_tv;
1153
1154    if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1155        return -TARGET_EFAULT;
1156    }
1157
1158    __put_user(tv->tv_sec, &target_tv->tv_sec);
1159    __put_user(tv->tv_usec, &target_tv->tv_usec);
1160
1161    unlock_user_struct(target_tv, target_tv_addr, 1);
1162
1163    return 0;
1164}
1165
1166#if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1167static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1168                                                abi_ulong target_tv_addr)
1169{
1170    struct target__kernel_sock_timeval *target_tv;
1171
1172    if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1173        return -TARGET_EFAULT;
1174    }
1175
1176    __get_user(tv->tv_sec, &target_tv->tv_sec);
1177    __get_user(tv->tv_usec, &target_tv->tv_usec);
1178
1179    unlock_user_struct(target_tv, target_tv_addr, 0);
1180
1181    return 0;
1182}
1183#endif
1184
1185static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1186                                              const struct timeval *tv)
1187{
1188    struct target__kernel_sock_timeval *target_tv;
1189
1190    if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1191        return -TARGET_EFAULT;
1192    }
1193
1194    __put_user(tv->tv_sec, &target_tv->tv_sec);
1195    __put_user(tv->tv_usec, &target_tv->tv_usec);
1196
1197    unlock_user_struct(target_tv, target_tv_addr, 1);
1198
1199    return 0;
1200}
1201
1202#if defined(TARGET_NR_futex) || \
1203    defined(TARGET_NR_rt_sigtimedwait) || \
1204    defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1205    defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1206    defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1207    defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1208    defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1209    defined(TARGET_NR_timer_settime) || \
1210    (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1211static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1212                                               abi_ulong target_addr)
1213{
1214    struct target_timespec *target_ts;
1215
1216    if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1217        return -TARGET_EFAULT;
1218    }
1219    __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1220    __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1221    unlock_user_struct(target_ts, target_addr, 0);
1222    return 0;
1223}
1224#endif
1225
1226#if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1227    defined(TARGET_NR_timer_settime64) || \
1228    defined(TARGET_NR_mq_timedsend_time64) || \
1229    defined(TARGET_NR_mq_timedreceive_time64) || \
1230    (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1231    defined(TARGET_NR_clock_nanosleep_time64) || \
1232    defined(TARGET_NR_rt_sigtimedwait_time64) || \
1233    defined(TARGET_NR_utimensat) || \
1234    defined(TARGET_NR_utimensat_time64) || \
1235    defined(TARGET_NR_semtimedop_time64) || \
1236    defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1237static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1238                                                 abi_ulong target_addr)
1239{
1240    struct target__kernel_timespec *target_ts;
1241
1242    if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1243        return -TARGET_EFAULT;
1244    }
1245    __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1246    __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1247    /* in 32bit mode, this drops the padding */
1248    host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1249    unlock_user_struct(target_ts, target_addr, 0);
1250    return 0;
1251}
1252#endif
1253
1254static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1255                                               struct timespec *host_ts)
1256{
1257    struct target_timespec *target_ts;
1258
1259    if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1260        return -TARGET_EFAULT;
1261    }
1262    __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1263    __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1264    unlock_user_struct(target_ts, target_addr, 1);
1265    return 0;
1266}
1267
1268static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1269                                                 struct timespec *host_ts)
1270{
1271    struct target__kernel_timespec *target_ts;
1272
1273    if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1274        return -TARGET_EFAULT;
1275    }
1276    __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1277    __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1278    unlock_user_struct(target_ts, target_addr, 1);
1279    return 0;
1280}
1281
1282#if defined(TARGET_NR_gettimeofday)
1283static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1284                                             struct timezone *tz)
1285{
1286    struct target_timezone *target_tz;
1287
1288    if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1289        return -TARGET_EFAULT;
1290    }
1291
1292    __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1293    __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1294
1295    unlock_user_struct(target_tz, target_tz_addr, 1);
1296
1297    return 0;
1298}
1299#endif
1300
1301#if defined(TARGET_NR_settimeofday)
1302static inline abi_long copy_from_user_timezone(struct timezone *tz,
1303                                               abi_ulong target_tz_addr)
1304{
1305    struct target_timezone *target_tz;
1306
1307    if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1308        return -TARGET_EFAULT;
1309    }
1310
1311    __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1312    __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1313
1314    unlock_user_struct(target_tz, target_tz_addr, 0);
1315
1316    return 0;
1317}
1318#endif
1319
1320#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1321#include <mqueue.h>
1322
1323static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1324                                              abi_ulong target_mq_attr_addr)
1325{
1326    struct target_mq_attr *target_mq_attr;
1327
1328    if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1329                          target_mq_attr_addr, 1))
1330        return -TARGET_EFAULT;
1331
1332    __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1333    __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1334    __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1335    __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1336
1337    unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1338
1339    return 0;
1340}
1341
1342static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1343                                            const struct mq_attr *attr)
1344{
1345    struct target_mq_attr *target_mq_attr;
1346
1347    if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1348                          target_mq_attr_addr, 0))
1349        return -TARGET_EFAULT;
1350
1351    __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1352    __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1353    __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1354    __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1355
1356    unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1357
1358    return 0;
1359}
1360#endif
1361
1362#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1363/* do_select() must return target values and target errnos. */
1364static abi_long do_select(int n,
1365                          abi_ulong rfd_addr, abi_ulong wfd_addr,
1366                          abi_ulong efd_addr, abi_ulong target_tv_addr)
1367{
1368    fd_set rfds, wfds, efds;
1369    fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1370    struct timeval tv;
1371    struct timespec ts, *ts_ptr;
1372    abi_long ret;
1373
1374    ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1375    if (ret) {
1376        return ret;
1377    }
1378    ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1379    if (ret) {
1380        return ret;
1381    }
1382    ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1383    if (ret) {
1384        return ret;
1385    }
1386
1387    if (target_tv_addr) {
1388        if (copy_from_user_timeval(&tv, target_tv_addr))
1389            return -TARGET_EFAULT;
1390        ts.tv_sec = tv.tv_sec;
1391        ts.tv_nsec = tv.tv_usec * 1000;
1392        ts_ptr = &ts;
1393    } else {
1394        ts_ptr = NULL;
1395    }
1396
1397    ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1398                                  ts_ptr, NULL));
1399
1400    if (!is_error(ret)) {
1401        if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1402            return -TARGET_EFAULT;
1403        if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1404            return -TARGET_EFAULT;
1405        if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1406            return -TARGET_EFAULT;
1407
1408        if (target_tv_addr) {
1409            tv.tv_sec = ts.tv_sec;
1410            tv.tv_usec = ts.tv_nsec / 1000;
1411            if (copy_to_user_timeval(target_tv_addr, &tv)) {
1412                return -TARGET_EFAULT;
1413            }
1414        }
1415    }
1416
1417    return ret;
1418}
1419
1420#if defined(TARGET_WANT_OLD_SYS_SELECT)
1421static abi_long do_old_select(abi_ulong arg1)
1422{
1423    struct target_sel_arg_struct *sel;
1424    abi_ulong inp, outp, exp, tvp;
1425    long nsel;
1426
1427    if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1428        return -TARGET_EFAULT;
1429    }
1430
1431    nsel = tswapal(sel->n);
1432    inp = tswapal(sel->inp);
1433    outp = tswapal(sel->outp);
1434    exp = tswapal(sel->exp);
1435    tvp = tswapal(sel->tvp);
1436
1437    unlock_user_struct(sel, arg1, 0);
1438
1439    return do_select(nsel, inp, outp, exp, tvp);
1440}
1441#endif
1442#endif
1443
1444#if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1445static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1446                            abi_long arg4, abi_long arg5, abi_long arg6,
1447                            bool time64)
1448{
1449    abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1450    fd_set rfds, wfds, efds;
1451    fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1452    struct timespec ts, *ts_ptr;
1453    abi_long ret;
1454
1455    /*
1456     * The 6th arg is actually two args smashed together,
1457     * so we cannot use the C library.
1458     */
1459    struct {
1460        sigset_t *set;
1461        size_t size;
1462    } sig, *sig_ptr;
1463
1464    abi_ulong arg_sigset, arg_sigsize, *arg7;
1465
1466    n = arg1;
1467    rfd_addr = arg2;
1468    wfd_addr = arg3;
1469    efd_addr = arg4;
1470    ts_addr = arg5;
1471
1472    ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1473    if (ret) {
1474        return ret;
1475    }
1476    ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1477    if (ret) {
1478        return ret;
1479    }
1480    ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1481    if (ret) {
1482        return ret;
1483    }
1484
1485    /*
1486     * This takes a timespec, and not a timeval, so we cannot
1487     * use the do_select() helper ...
1488     */
1489    if (ts_addr) {
1490        if (time64) {
1491            if (target_to_host_timespec64(&ts, ts_addr)) {
1492                return -TARGET_EFAULT;
1493            }
1494        } else {
1495            if (target_to_host_timespec(&ts, ts_addr)) {
1496                return -TARGET_EFAULT;
1497            }
1498        }
1499            ts_ptr = &ts;
1500    } else {
1501        ts_ptr = NULL;
1502    }
1503
1504    /* Extract the two packed args for the sigset */
1505    sig_ptr = NULL;
1506    if (arg6) {
1507        arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1508        if (!arg7) {
1509            return -TARGET_EFAULT;
1510        }
1511        arg_sigset = tswapal(arg7[0]);
1512        arg_sigsize = tswapal(arg7[1]);
1513        unlock_user(arg7, arg6, 0);
1514
1515        if (arg_sigset) {
1516            ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1517            if (ret != 0) {
1518                return ret;
1519            }
1520            sig_ptr = &sig;
1521            sig.size = SIGSET_T_SIZE;
1522        }
1523    }
1524
1525    ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1526                                  ts_ptr, sig_ptr));
1527
1528    if (sig_ptr) {
1529        finish_sigsuspend_mask(ret);
1530    }
1531
1532    if (!is_error(ret)) {
1533        if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1534            return -TARGET_EFAULT;
1535        }
1536        if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1537            return -TARGET_EFAULT;
1538        }
1539        if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1540            return -TARGET_EFAULT;
1541        }
1542        if (time64) {
1543            if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1544                return -TARGET_EFAULT;
1545            }
1546        } else {
1547            if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1548                return -TARGET_EFAULT;
1549            }
1550        }
1551    }
1552    return ret;
1553}
1554#endif
1555
1556#if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1557    defined(TARGET_NR_ppoll_time64)
1558static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1559                         abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1560{
1561    struct target_pollfd *target_pfd;
1562    unsigned int nfds = arg2;
1563    struct pollfd *pfd;
1564    unsigned int i;
1565    abi_long ret;
1566
1567    pfd = NULL;
1568    target_pfd = NULL;
1569    if (nfds) {
1570        if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1571            return -TARGET_EINVAL;
1572        }
1573        target_pfd = lock_user(VERIFY_WRITE, arg1,
1574                               sizeof(struct target_pollfd) * nfds, 1);
1575        if (!target_pfd) {
1576            return -TARGET_EFAULT;
1577        }
1578
1579        pfd = alloca(sizeof(struct pollfd) * nfds);
1580        for (i = 0; i < nfds; i++) {
1581            pfd[i].fd = tswap32(target_pfd[i].fd);
1582            pfd[i].events = tswap16(target_pfd[i].events);
1583        }
1584    }
1585    if (ppoll) {
1586        struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1587        sigset_t *set = NULL;
1588
1589        if (arg3) {
1590            if (time64) {
1591                if (target_to_host_timespec64(timeout_ts, arg3)) {
1592                    unlock_user(target_pfd, arg1, 0);
1593                    return -TARGET_EFAULT;
1594                }
1595            } else {
1596                if (target_to_host_timespec(timeout_ts, arg3)) {
1597                    unlock_user(target_pfd, arg1, 0);
1598                    return -TARGET_EFAULT;
1599                }
1600            }
1601        } else {
1602            timeout_ts = NULL;
1603        }
1604
1605        if (arg4) {
1606            ret = process_sigsuspend_mask(&set, arg4, arg5);
1607            if (ret != 0) {
1608                unlock_user(target_pfd, arg1, 0);
1609                return ret;
1610            }
1611        }
1612
1613        ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1614                                   set, SIGSET_T_SIZE));
1615
1616        if (set) {
1617            finish_sigsuspend_mask(ret);
1618        }
1619        if (!is_error(ret) && arg3) {
1620            if (time64) {
1621                if (host_to_target_timespec64(arg3, timeout_ts)) {
1622                    return -TARGET_EFAULT;
1623                }
1624            } else {
1625                if (host_to_target_timespec(arg3, timeout_ts)) {
1626                    return -TARGET_EFAULT;
1627                }
1628            }
1629        }
1630    } else {
1631          struct timespec ts, *pts;
1632
1633          if (arg3 >= 0) {
1634              /* Convert ms to secs, ns */
1635              ts.tv_sec = arg3 / 1000;
1636              ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1637              pts = &ts;
1638          } else {
1639              /* -ve poll() timeout means "infinite" */
1640              pts = NULL;
1641          }
1642          ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1643    }
1644
1645    if (!is_error(ret)) {
1646        for (i = 0; i < nfds; i++) {
1647            target_pfd[i].revents = tswap16(pfd[i].revents);
1648        }
1649    }
1650    unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1651    return ret;
1652}
1653#endif
1654
1655static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1656                        int flags, int is_pipe2)
1657{
1658    int host_pipe[2];
1659    abi_long ret;
1660    ret = pipe2(host_pipe, flags);
1661
1662    if (is_error(ret))
1663        return get_errno(ret);
1664
1665    /* Several targets have special calling conventions for the original
1666       pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1667    if (!is_pipe2) {
1668#if defined(TARGET_ALPHA)
1669        cpu_env->ir[IR_A4] = host_pipe[1];
1670        return host_pipe[0];
1671#elif defined(TARGET_MIPS)
1672        cpu_env->active_tc.gpr[3] = host_pipe[1];
1673        return host_pipe[0];
1674#elif defined(TARGET_SH4)
1675        cpu_env->gregs[1] = host_pipe[1];
1676        return host_pipe[0];
1677#elif defined(TARGET_SPARC)
1678        cpu_env->regwptr[1] = host_pipe[1];
1679        return host_pipe[0];
1680#endif
1681    }
1682
1683    if (put_user_s32(host_pipe[0], pipedes)
1684        || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1685        return -TARGET_EFAULT;
1686    return get_errno(ret);
1687}
1688
1689static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1690                                              abi_ulong target_addr,
1691                                              socklen_t len)
1692{
1693    struct target_ip_mreqn *target_smreqn;
1694
1695    target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1696    if (!target_smreqn)
1697        return -TARGET_EFAULT;
1698    mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1699    mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1700    if (len == sizeof(struct target_ip_mreqn))
1701        mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1702    unlock_user(target_smreqn, target_addr, 0);
1703
1704    return 0;
1705}
1706
1707static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1708                                               abi_ulong target_addr,
1709                                               socklen_t len)
1710{
1711    const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1712    sa_family_t sa_family;
1713    struct target_sockaddr *target_saddr;
1714
1715    if (fd_trans_target_to_host_addr(fd)) {
1716        return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1717    }
1718
1719    target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1720    if (!target_saddr)
1721        return -TARGET_EFAULT;
1722
1723    sa_family = tswap16(target_saddr->sa_family);
1724
1725    /* Oops. The caller might send a incomplete sun_path; sun_path
1726     * must be terminated by \0 (see the manual page), but
1727     * unfortunately it is quite common to specify sockaddr_un
1728     * length as "strlen(x->sun_path)" while it should be
1729     * "strlen(...) + 1". We'll fix that here if needed.
1730     * Linux kernel has a similar feature.
1731     */
1732
1733    if (sa_family == AF_UNIX) {
1734        if (len < unix_maxlen && len > 0) {
1735            char *cp = (char*)target_saddr;
1736
1737            if ( cp[len-1] && !cp[len] )
1738                len++;
1739        }
1740        if (len > unix_maxlen)
1741            len = unix_maxlen;
1742    }
1743
1744    memcpy(addr, target_saddr, len);
1745    addr->sa_family = sa_family;
1746    if (sa_family == AF_NETLINK) {
1747        struct sockaddr_nl *nladdr;
1748
1749        nladdr = (struct sockaddr_nl *)addr;
1750        nladdr->nl_pid = tswap32(nladdr->nl_pid);
1751        nladdr->nl_groups = tswap32(nladdr->nl_groups);
1752    } else if (sa_family == AF_PACKET) {
1753        struct target_sockaddr_ll *lladdr;
1754
1755        lladdr = (struct target_sockaddr_ll *)addr;
1756        lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1757        lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1758    }
1759    unlock_user(target_saddr, target_addr, 0);
1760
1761    return 0;
1762}
1763
1764static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1765                                               struct sockaddr *addr,
1766                                               socklen_t len)
1767{
1768    struct target_sockaddr *target_saddr;
1769
1770    if (len == 0) {
1771        return 0;
1772    }
1773    assert(addr);
1774
1775    target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1776    if (!target_saddr)
1777        return -TARGET_EFAULT;
1778    memcpy(target_saddr, addr, len);
1779    if (len >= offsetof(struct target_sockaddr, sa_family) +
1780        sizeof(target_saddr->sa_family)) {
1781        target_saddr->sa_family = tswap16(addr->sa_family);
1782    }
1783    if (addr->sa_family == AF_NETLINK &&
1784        len >= sizeof(struct target_sockaddr_nl)) {
1785        struct target_sockaddr_nl *target_nl =
1786               (struct target_sockaddr_nl *)target_saddr;
1787        target_nl->nl_pid = tswap32(target_nl->nl_pid);
1788        target_nl->nl_groups = tswap32(target_nl->nl_groups);
1789    } else if (addr->sa_family == AF_PACKET) {
1790        struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1791        target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1792        target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1793    } else if (addr->sa_family == AF_INET6 &&
1794               len >= sizeof(struct target_sockaddr_in6)) {
1795        struct target_sockaddr_in6 *target_in6 =
1796               (struct target_sockaddr_in6 *)target_saddr;
1797        target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1798    }
1799    unlock_user(target_saddr, target_addr, len);
1800
1801    return 0;
1802}
1803
1804static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1805                                           struct target_msghdr *target_msgh)
1806{
1807    struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1808    abi_long msg_controllen;
1809    abi_ulong target_cmsg_addr;
1810    struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1811    socklen_t space = 0;
1812    
1813    msg_controllen = tswapal(target_msgh->msg_controllen);
1814    if (msg_controllen < sizeof (struct target_cmsghdr)) 
1815        goto the_end;
1816    target_cmsg_addr = tswapal(target_msgh->msg_control);
1817    target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1818    target_cmsg_start = target_cmsg;
1819    if (!target_cmsg)
1820        return -TARGET_EFAULT;
1821
1822    while (cmsg && target_cmsg) {
1823        void *data = CMSG_DATA(cmsg);
1824        void *target_data = TARGET_CMSG_DATA(target_cmsg);
1825
1826        int len = tswapal(target_cmsg->cmsg_len)
1827            - sizeof(struct target_cmsghdr);
1828
1829        space += CMSG_SPACE(len);
1830        if (space > msgh->msg_controllen) {
1831            space -= CMSG_SPACE(len);
1832            /* This is a QEMU bug, since we allocated the payload
1833             * area ourselves (unlike overflow in host-to-target
1834             * conversion, which is just the guest giving us a buffer
1835             * that's too small). It can't happen for the payload types
1836             * we currently support; if it becomes an issue in future
1837             * we would need to improve our allocation strategy to
1838             * something more intelligent than "twice the size of the
1839             * target buffer we're reading from".
1840             */
1841            qemu_log_mask(LOG_UNIMP,
1842                          ("Unsupported ancillary data %d/%d: "
1843                           "unhandled msg size\n"),
1844                          tswap32(target_cmsg->cmsg_level),
1845                          tswap32(target_cmsg->cmsg_type));
1846            break;
1847        }
1848
1849        if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1850            cmsg->cmsg_level = SOL_SOCKET;
1851        } else {
1852            cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1853        }
1854        cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1855        cmsg->cmsg_len = CMSG_LEN(len);
1856
1857        if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1858            int *fd = (int *)data;
1859            int *target_fd = (int *)target_data;
1860            int i, numfds = len / sizeof(int);
1861
1862            for (i = 0; i < numfds; i++) {
1863                __get_user(fd[i], target_fd + i);
1864            }
1865        } else if (cmsg->cmsg_level == SOL_SOCKET
1866               &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1867            struct ucred *cred = (struct ucred *)data;
1868            struct target_ucred *target_cred =
1869                (struct target_ucred *)target_data;
1870
1871            __get_user(cred->pid, &target_cred->pid);
1872            __get_user(cred->uid, &target_cred->uid);
1873            __get_user(cred->gid, &target_cred->gid);
1874        } else {
1875            qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1876                          cmsg->cmsg_level, cmsg->cmsg_type);
1877            memcpy(data, target_data, len);
1878        }
1879
1880        cmsg = CMSG_NXTHDR(msgh, cmsg);
1881        target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1882                                         target_cmsg_start);
1883    }
1884    unlock_user(target_cmsg, target_cmsg_addr, 0);
1885 the_end:
1886    msgh->msg_controllen = space;
1887    return 0;
1888}
1889
1890static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1891                                           struct msghdr *msgh)
1892{
1893    struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1894    abi_long msg_controllen;
1895    abi_ulong target_cmsg_addr;
1896    struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1897    socklen_t space = 0;
1898
1899    msg_controllen = tswapal(target_msgh->msg_controllen);
1900    if (msg_controllen < sizeof (struct target_cmsghdr)) 
1901        goto the_end;
1902    target_cmsg_addr = tswapal(target_msgh->msg_control);
1903    target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1904    target_cmsg_start = target_cmsg;
1905    if (!target_cmsg)
1906        return -TARGET_EFAULT;
1907
1908    while (cmsg && target_cmsg) {
1909        void *data = CMSG_DATA(cmsg);
1910        void *target_data = TARGET_CMSG_DATA(target_cmsg);
1911
1912        int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1913        int tgt_len, tgt_space;
1914
1915        /* We never copy a half-header but may copy half-data;
1916         * this is Linux's behaviour in put_cmsg(). Note that
1917         * truncation here is a guest problem (which we report
1918         * to the guest via the CTRUNC bit), unlike truncation
1919         * in target_to_host_cmsg, which is a QEMU bug.
1920         */
1921        if (msg_controllen < sizeof(struct target_cmsghdr)) {
1922            target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1923            break;
1924        }
1925
1926        if (cmsg->cmsg_level == SOL_SOCKET) {
1927            target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1928        } else {
1929            target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1930        }
1931        target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1932
1933        /* Payload types which need a different size of payload on
1934         * the target must adjust tgt_len here.
1935         */
1936        tgt_len = len;
1937        switch (cmsg->cmsg_level) {
1938        case SOL_SOCKET:
1939            switch (cmsg->cmsg_type) {
1940            case SO_TIMESTAMP:
1941                tgt_len = sizeof(struct target_timeval);
1942                break;
1943            default:
1944                break;
1945            }
1946            break;
1947        default:
1948            break;
1949        }
1950
1951        if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1952            target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1953            tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1954        }
1955
1956        /* We must now copy-and-convert len bytes of payload
1957         * into tgt_len bytes of destination space. Bear in mind
1958         * that in both source and destination we may be dealing
1959         * with a truncated value!
1960         */
1961        switch (cmsg->cmsg_level) {
1962        case SOL_SOCKET:
1963            switch (cmsg->cmsg_type) {
1964            case SCM_RIGHTS:
1965            {
1966                int *fd = (int *)data;
1967                int *target_fd = (int *)target_data;
1968                int i, numfds = tgt_len / sizeof(int);
1969
1970                for (i = 0; i < numfds; i++) {
1971                    __put_user(fd[i], target_fd + i);
1972                }
1973                break;
1974            }
1975            case SO_TIMESTAMP:
1976            {
1977                struct timeval *tv = (struct timeval *)data;
1978                struct target_timeval *target_tv =
1979                    (struct target_timeval *)target_data;
1980
1981                if (len != sizeof(struct timeval) ||
1982                    tgt_len != sizeof(struct target_timeval)) {
1983                    goto unimplemented;
1984                }
1985
1986                /* copy struct timeval to target */
1987                __put_user(tv->tv_sec, &target_tv->tv_sec);
1988                __put_user(tv->tv_usec, &target_tv->tv_usec);
1989                break;
1990            }
1991            case SCM_CREDENTIALS:
1992            {
1993                struct ucred *cred = (struct ucred *)data;
1994                struct target_ucred *target_cred =
1995                    (struct target_ucred *)target_data;
1996
1997                __put_user(cred->pid, &target_cred->pid);
1998                __put_user(cred->uid, &target_cred->uid);
1999                __put_user(cred->gid, &target_cred->gid);
2000                break;
2001            }
2002            default:
2003                goto unimplemented;
2004            }
2005            break;
2006
2007        case SOL_IP:
2008            switch (cmsg->cmsg_type) {
2009            case IP_TTL:
2010            {
2011                uint32_t *v = (uint32_t *)data;
2012                uint32_t *t_int = (uint32_t *)target_data;
2013
2014                if (len != sizeof(uint32_t) ||
2015                    tgt_len != sizeof(uint32_t)) {
2016                    goto unimplemented;
2017                }
2018                __put_user(*v, t_int);
2019                break;
2020            }
2021            case IP_RECVERR:
2022            {
2023                struct errhdr_t {
2024                   struct sock_extended_err ee;
2025                   struct sockaddr_in offender;
2026                };
2027                struct errhdr_t *errh = (struct errhdr_t *)data;
2028                struct errhdr_t *target_errh =
2029                    (struct errhdr_t *)target_data;
2030
2031                if (len != sizeof(struct errhdr_t) ||
2032                    tgt_len != sizeof(struct errhdr_t)) {
2033                    goto unimplemented;
2034                }
2035                __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2036                __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2037                __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2038                __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2039                __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2040                __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2041                __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2042                host_to_target_sockaddr((unsigned long) &target_errh->offender,
2043                    (void *) &errh->offender, sizeof(errh->offender));
2044                break;
2045            }
2046            default:
2047                goto unimplemented;
2048            }
2049            break;
2050
2051        case SOL_IPV6:
2052            switch (cmsg->cmsg_type) {
2053            case IPV6_HOPLIMIT:
2054            {
2055                uint32_t *v = (uint32_t *)data;
2056                uint32_t *t_int = (uint32_t *)target_data;
2057
2058                if (len != sizeof(uint32_t) ||
2059                    tgt_len != sizeof(uint32_t)) {
2060                    goto unimplemented;
2061                }
2062                __put_user(*v, t_int);
2063                break;
2064            }
2065            case IPV6_RECVERR:
2066            {
2067                struct errhdr6_t {
2068                   struct sock_extended_err ee;
2069                   struct sockaddr_in6 offender;
2070                };
2071                struct errhdr6_t *errh = (struct errhdr6_t *)data;
2072                struct errhdr6_t *target_errh =
2073                    (struct errhdr6_t *)target_data;
2074
2075                if (len != sizeof(struct errhdr6_t) ||
2076                    tgt_len != sizeof(struct errhdr6_t)) {
2077                    goto unimplemented;
2078                }
2079                __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2080                __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2081                __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2082                __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2083                __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2084                __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2085                __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2086                host_to_target_sockaddr((unsigned long) &target_errh->offender,
2087                    (void *) &errh->offender, sizeof(errh->offender));
2088                break;
2089            }
2090            default:
2091                goto unimplemented;
2092            }
2093            break;
2094
2095        default:
2096        unimplemented:
2097            qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2098                          cmsg->cmsg_level, cmsg->cmsg_type);
2099            memcpy(target_data, data, MIN(len, tgt_len));
2100            if (tgt_len > len) {
2101                memset(target_data + len, 0, tgt_len - len);
2102            }
2103        }
2104
2105        target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2106        tgt_space = TARGET_CMSG_SPACE(tgt_len);
2107        if (msg_controllen < tgt_space) {
2108            tgt_space = msg_controllen;
2109        }
2110        msg_controllen -= tgt_space;
2111        space += tgt_space;
2112        cmsg = CMSG_NXTHDR(msgh, cmsg);
2113        target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2114                                         target_cmsg_start);
2115    }
2116    unlock_user(target_cmsg, target_cmsg_addr, space);
2117 the_end:
2118    target_msgh->msg_controllen = tswapal(space);
2119    return 0;
2120}
2121
2122/* do_setsockopt() Must return target values and target errnos. */
2123static abi_long do_setsockopt(int sockfd, int level, int optname,
2124                              abi_ulong optval_addr, socklen_t optlen)
2125{
2126    abi_long ret;
2127    int val;
2128    struct ip_mreqn *ip_mreq;
2129    struct ip_mreq_source *ip_mreq_source;
2130
2131    switch(level) {
2132    case SOL_TCP:
2133    case SOL_UDP:
2134        /* TCP and UDP options all take an 'int' value.  */
2135        if (optlen < sizeof(uint32_t))
2136            return -TARGET_EINVAL;
2137
2138        if (get_user_u32(val, optval_addr))
2139            return -TARGET_EFAULT;
2140        ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2141        break;
2142    case SOL_IP:
2143        switch(optname) {
2144        case IP_TOS:
2145        case IP_TTL:
2146        case IP_HDRINCL:
2147        case IP_ROUTER_ALERT:
2148        case IP_RECVOPTS:
2149        case IP_RETOPTS:
2150        case IP_PKTINFO:
2151        case IP_MTU_DISCOVER:
2152        case IP_RECVERR:
2153        case IP_RECVTTL:
2154        case IP_RECVTOS:
2155#ifdef IP_FREEBIND
2156        case IP_FREEBIND:
2157#endif
2158        case IP_MULTICAST_TTL:
2159        case IP_MULTICAST_LOOP:
2160            val = 0;
2161            if (optlen >= sizeof(uint32_t)) {
2162                if (get_user_u32(val, optval_addr))
2163                    return -TARGET_EFAULT;
2164            } else if (optlen >= 1) {
2165                if (get_user_u8(val, optval_addr))
2166                    return -TARGET_EFAULT;
2167            }
2168            ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2169            break;
2170        case IP_ADD_MEMBERSHIP:
2171        case IP_DROP_MEMBERSHIP:
2172            if (optlen < sizeof (struct target_ip_mreq) ||
2173                optlen > sizeof (struct target_ip_mreqn))
2174                return -TARGET_EINVAL;
2175
2176            ip_mreq = (struct ip_mreqn *) alloca(optlen);
2177            target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2178            ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2179            break;
2180
2181        case IP_BLOCK_SOURCE:
2182        case IP_UNBLOCK_SOURCE:
2183        case IP_ADD_SOURCE_MEMBERSHIP:
2184        case IP_DROP_SOURCE_MEMBERSHIP:
2185            if (optlen != sizeof (struct target_ip_mreq_source))
2186                return -TARGET_EINVAL;
2187
2188            ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2189            if (!ip_mreq_source) {
2190                return -TARGET_EFAULT;
2191            }
2192            ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2193            unlock_user (ip_mreq_source, optval_addr, 0);
2194            break;
2195
2196        default:
2197            goto unimplemented;
2198        }
2199        break;
2200    case SOL_IPV6:
2201        switch (optname) {
2202        case IPV6_MTU_DISCOVER:
2203        case IPV6_MTU:
2204        case IPV6_V6ONLY:
2205        case IPV6_RECVPKTINFO:
2206        case IPV6_UNICAST_HOPS:
2207        case IPV6_MULTICAST_HOPS:
2208        case IPV6_MULTICAST_LOOP:
2209        case IPV6_RECVERR:
2210        case IPV6_RECVHOPLIMIT:
2211        case IPV6_2292HOPLIMIT:
2212        case IPV6_CHECKSUM:
2213        case IPV6_ADDRFORM:
2214        case IPV6_2292PKTINFO:
2215        case IPV6_RECVTCLASS:
2216        case IPV6_RECVRTHDR:
2217        case IPV6_2292RTHDR:
2218        case IPV6_RECVHOPOPTS:
2219        case IPV6_2292HOPOPTS:
2220        case IPV6_RECVDSTOPTS:
2221        case IPV6_2292DSTOPTS:
2222        case IPV6_TCLASS:
2223        case IPV6_ADDR_PREFERENCES:
2224#ifdef IPV6_RECVPATHMTU
2225        case IPV6_RECVPATHMTU:
2226#endif
2227#ifdef IPV6_TRANSPARENT
2228        case IPV6_TRANSPARENT:
2229#endif
2230#ifdef IPV6_FREEBIND
2231        case IPV6_FREEBIND:
2232#endif
2233#ifdef IPV6_RECVORIGDSTADDR
2234        case IPV6_RECVORIGDSTADDR:
2235#endif
2236            val = 0;
2237            if (optlen < sizeof(uint32_t)) {
2238                return -TARGET_EINVAL;
2239            }
2240            if (get_user_u32(val, optval_addr)) {
2241                return -TARGET_EFAULT;
2242            }
2243            ret = get_errno(setsockopt(sockfd, level, optname,
2244                                       &val, sizeof(val)));
2245            break;
2246        case IPV6_PKTINFO:
2247        {
2248            struct in6_pktinfo pki;
2249
2250            if (optlen < sizeof(pki)) {
2251                return -TARGET_EINVAL;
2252            }
2253
2254            if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2255                return -TARGET_EFAULT;
2256            }
2257
2258            pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2259
2260            ret = get_errno(setsockopt(sockfd, level, optname,
2261                                       &pki, sizeof(pki)));
2262            break;
2263        }
2264        case IPV6_ADD_MEMBERSHIP:
2265        case IPV6_DROP_MEMBERSHIP:
2266        {
2267            struct ipv6_mreq ipv6mreq;
2268
2269            if (optlen < sizeof(ipv6mreq)) {
2270                return -TARGET_EINVAL;
2271            }
2272
2273            if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2274                return -TARGET_EFAULT;
2275            }
2276
2277            ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2278
2279            ret = get_errno(setsockopt(sockfd, level, optname,
2280                                       &ipv6mreq, sizeof(ipv6mreq)));
2281            break;
2282        }
2283        default:
2284            goto unimplemented;
2285        }
2286        break;
2287    case SOL_ICMPV6:
2288        switch (optname) {
2289        case ICMPV6_FILTER:
2290        {
2291            struct icmp6_filter icmp6f;
2292
2293            if (optlen > sizeof(icmp6f)) {
2294                optlen = sizeof(icmp6f);
2295            }
2296
2297            if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2298                return -TARGET_EFAULT;
2299            }
2300
2301            for (val = 0; val < 8; val++) {
2302                icmp6f.data[val] = tswap32(icmp6f.data[val]);
2303            }
2304
2305            ret = get_errno(setsockopt(sockfd, level, optname,
2306                                       &icmp6f, optlen));
2307            break;
2308        }
2309        default:
2310            goto unimplemented;
2311        }
2312        break;
2313    case SOL_RAW:
2314        switch (optname) {
2315        case ICMP_FILTER:
2316        case IPV6_CHECKSUM:
2317            /* those take an u32 value */
2318            if (optlen < sizeof(uint32_t)) {
2319                return -TARGET_EINVAL;
2320            }
2321
2322            if (get_user_u32(val, optval_addr)) {
2323                return -TARGET_EFAULT;
2324            }
2325            ret = get_errno(setsockopt(sockfd, level, optname,
2326                                       &val, sizeof(val)));
2327            break;
2328
2329        default:
2330            goto unimplemented;
2331        }
2332        break;
2333#if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2334    case SOL_ALG:
2335        switch (optname) {
2336        case ALG_SET_KEY:
2337        {
2338            char *alg_key = g_malloc(optlen);
2339
2340            if (!alg_key) {
2341                return -TARGET_ENOMEM;
2342            }
2343            if (copy_from_user(alg_key, optval_addr, optlen)) {
2344                g_free(alg_key);
2345                return -TARGET_EFAULT;
2346            }
2347            ret = get_errno(setsockopt(sockfd, level, optname,
2348                                       alg_key, optlen));
2349            g_free(alg_key);
2350            break;
2351        }
2352        case ALG_SET_AEAD_AUTHSIZE:
2353        {
2354            ret = get_errno(setsockopt(sockfd, level, optname,
2355                                       NULL, optlen));
2356            break;
2357        }
2358        default:
2359            goto unimplemented;
2360        }
2361        break;
2362#endif
2363    case TARGET_SOL_SOCKET:
2364        switch (optname) {
2365        case TARGET_SO_RCVTIMEO:
2366        {
2367                struct timeval tv;
2368
2369                optname = SO_RCVTIMEO;
2370
2371set_timeout:
2372                if (optlen != sizeof(struct target_timeval)) {
2373                    return -TARGET_EINVAL;
2374                }
2375
2376                if (copy_from_user_timeval(&tv, optval_addr)) {
2377                    return -TARGET_EFAULT;
2378                }
2379
2380                ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2381                                &tv, sizeof(tv)));
2382                return ret;
2383        }
2384        case TARGET_SO_SNDTIMEO:
2385                optname = SO_SNDTIMEO;
2386                goto set_timeout;
2387        case TARGET_SO_ATTACH_FILTER:
2388        {
2389                struct target_sock_fprog *tfprog;
2390                struct target_sock_filter *tfilter;
2391                struct sock_fprog fprog;
2392                struct sock_filter *filter;
2393                int i;
2394
2395                if (optlen != sizeof(*tfprog)) {
2396                    return -TARGET_EINVAL;
2397                }
2398                if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2399                    return -TARGET_EFAULT;
2400                }
2401                if (!lock_user_struct(VERIFY_READ, tfilter,
2402                                      tswapal(tfprog->filter), 0)) {
2403                    unlock_user_struct(tfprog, optval_addr, 1);
2404                    return -TARGET_EFAULT;
2405                }
2406
2407                fprog.len = tswap16(tfprog->len);
2408                filter = g_try_new(struct sock_filter, fprog.len);
2409                if (filter == NULL) {
2410                    unlock_user_struct(tfilter, tfprog->filter, 1);
2411                    unlock_user_struct(tfprog, optval_addr, 1);
2412                    return -TARGET_ENOMEM;
2413                }
2414                for (i = 0; i < fprog.len; i++) {
2415                    filter[i].code = tswap16(tfilter[i].code);
2416                    filter[i].jt = tfilter[i].jt;
2417                    filter[i].jf = tfilter[i].jf;
2418                    filter[i].k = tswap32(tfilter[i].k);
2419                }
2420                fprog.filter = filter;
2421
2422                ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2423                                SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2424                g_free(filter);
2425
2426                unlock_user_struct(tfilter, tfprog->filter, 1);
2427                unlock_user_struct(tfprog, optval_addr, 1);
2428                return ret;
2429        }
2430        case TARGET_SO_BINDTODEVICE:
2431        {
2432                char *dev_ifname, *addr_ifname;
2433
2434                if (optlen > IFNAMSIZ - 1) {
2435                    optlen = IFNAMSIZ - 1;
2436                }
2437                dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2438                if (!dev_ifname) {
2439                    return -TARGET_EFAULT;
2440                }
2441                optname = SO_BINDTODEVICE;
2442                addr_ifname = alloca(IFNAMSIZ);
2443                memcpy(addr_ifname, dev_ifname, optlen);
2444                addr_ifname[optlen] = 0;
2445                ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2446                                           addr_ifname, optlen));
2447                unlock_user (dev_ifname, optval_addr, 0);
2448                return ret;
2449        }
2450        case TARGET_SO_LINGER:
2451        {
2452                struct linger lg;
2453                struct target_linger *tlg;
2454
2455                if (optlen != sizeof(struct target_linger)) {
2456                    return -TARGET_EINVAL;
2457                }
2458                if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2459                    return -TARGET_EFAULT;
2460                }
2461                __get_user(lg.l_onoff, &tlg->l_onoff);
2462                __get_user(lg.l_linger, &tlg->l_linger);
2463                ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2464                                &lg, sizeof(lg)));
2465                unlock_user_struct(tlg, optval_addr, 0);
2466                return ret;
2467        }
2468            /* Options with 'int' argument.  */
2469        case TARGET_SO_DEBUG:
2470                optname = SO_DEBUG;
2471                break;
2472        case TARGET_SO_REUSEADDR:
2473                optname = SO_REUSEADDR;
2474                break;
2475#ifdef SO_REUSEPORT
2476        case TARGET_SO_REUSEPORT:
2477                optname = SO_REUSEPORT;
2478                break;
2479#endif
2480        case TARGET_SO_TYPE:
2481                optname = SO_TYPE;
2482                break;
2483        case TARGET_SO_ERROR:
2484                optname = SO_ERROR;
2485                break;
2486        case TARGET_SO_DONTROUTE:
2487                optname = SO_DONTROUTE;
2488                break;
2489        case TARGET_SO_BROADCAST:
2490                optname = SO_BROADCAST;
2491                break;
2492        case TARGET_SO_SNDBUF:
2493                optname = SO_SNDBUF;
2494                break;
2495        case TARGET_SO_SNDBUFFORCE:
2496                optname = SO_SNDBUFFORCE;
2497                break;
2498        case TARGET_SO_RCVBUF:
2499                optname = SO_RCVBUF;
2500                break;
2501        case TARGET_SO_RCVBUFFORCE:
2502                optname = SO_RCVBUFFORCE;
2503                break;
2504        case TARGET_SO_KEEPALIVE:
2505                optname = SO_KEEPALIVE;
2506                break;
2507        case TARGET_SO_OOBINLINE:
2508                optname = SO_OOBINLINE;
2509                break;
2510        case TARGET_SO_NO_CHECK:
2511                optname = SO_NO_CHECK;
2512                break;
2513        case TARGET_SO_PRIORITY:
2514                optname = SO_PRIORITY;
2515                break;
2516#ifdef SO_BSDCOMPAT
2517        case TARGET_SO_BSDCOMPAT:
2518                optname = SO_BSDCOMPAT;
2519                break;
2520#endif
2521        case TARGET_SO_PASSCRED:
2522                optname = SO_PASSCRED;
2523                break;
2524        case TARGET_SO_PASSSEC:
2525                optname = SO_PASSSEC;
2526                break;
2527        case TARGET_SO_TIMESTAMP:
2528                optname = SO_TIMESTAMP;
2529                break;
2530        case TARGET_SO_RCVLOWAT:
2531                optname = SO_RCVLOWAT;
2532                break;
2533        default:
2534            goto unimplemented;
2535        }
2536        if (optlen < sizeof(uint32_t))
2537            return -TARGET_EINVAL;
2538
2539        if (get_user_u32(val, optval_addr))
2540            return -TARGET_EFAULT;
2541        ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2542        break;
2543#ifdef SOL_NETLINK
2544    case SOL_NETLINK:
2545        switch (optname) {
2546        case NETLINK_PKTINFO:
2547        case NETLINK_ADD_MEMBERSHIP:
2548        case NETLINK_DROP_MEMBERSHIP:
2549        case NETLINK_BROADCAST_ERROR:
2550        case NETLINK_NO_ENOBUFS:
2551#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2552        case NETLINK_LISTEN_ALL_NSID:
2553        case NETLINK_CAP_ACK:
2554#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2555#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2556        case NETLINK_EXT_ACK:
2557#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2558#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2559        case NETLINK_GET_STRICT_CHK:
2560#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2561            break;
2562        default:
2563            goto unimplemented;
2564        }
2565        val = 0;
2566        if (optlen < sizeof(uint32_t)) {
2567            return -TARGET_EINVAL;
2568        }
2569        if (get_user_u32(val, optval_addr)) {
2570            return -TARGET_EFAULT;
2571        }
2572        ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2573                                   sizeof(val)));
2574        break;
2575#endif /* SOL_NETLINK */
2576    default:
2577    unimplemented:
2578        qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2579                      level, optname);
2580        ret = -TARGET_ENOPROTOOPT;
2581    }
2582    return ret;
2583}
2584
2585/* do_getsockopt() Must return target values and target errnos. */
2586static abi_long do_getsockopt(int sockfd, int level, int optname,
2587                              abi_ulong optval_addr, abi_ulong optlen)
2588{
2589    abi_long ret;
2590    int len, val;
2591    socklen_t lv;
2592
2593    switch(level) {
2594    case TARGET_SOL_SOCKET:
2595        level = SOL_SOCKET;
2596        switch (optname) {
2597        /* These don't just return a single integer */
2598        case TARGET_SO_PEERNAME:
2599            goto unimplemented;
2600        case TARGET_SO_RCVTIMEO: {
2601            struct timeval tv;
2602            socklen_t tvlen;
2603
2604            optname = SO_RCVTIMEO;
2605
2606get_timeout:
2607            if (get_user_u32(len, optlen)) {
2608                return -TARGET_EFAULT;
2609            }
2610            if (len < 0) {
2611                return -TARGET_EINVAL;
2612            }
2613
2614            tvlen = sizeof(tv);
2615            ret = get_errno(getsockopt(sockfd, level, optname,
2616                                       &tv, &tvlen));
2617            if (ret < 0) {
2618                return ret;
2619            }
2620            if (len > sizeof(struct target_timeval)) {
2621                len = sizeof(struct target_timeval);
2622            }
2623            if (copy_to_user_timeval(optval_addr, &tv)) {
2624                return -TARGET_EFAULT;
2625            }
2626            if (put_user_u32(len, optlen)) {
2627                return -TARGET_EFAULT;
2628            }
2629            break;
2630        }
2631        case TARGET_SO_SNDTIMEO:
2632            optname = SO_SNDTIMEO;
2633            goto get_timeout;
2634        case TARGET_SO_PEERCRED: {
2635            struct ucred cr;
2636            socklen_t crlen;
2637            struct target_ucred *tcr;
2638
2639            if (get_user_u32(len, optlen)) {
2640                return -TARGET_EFAULT;
2641            }
2642            if (len < 0) {
2643                return -TARGET_EINVAL;
2644            }
2645
2646            crlen = sizeof(cr);
2647            ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2648                                       &cr, &crlen));
2649            if (ret < 0) {
2650                return ret;
2651            }
2652            if (len > crlen) {
2653                len = crlen;
2654            }
2655            if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2656                return -TARGET_EFAULT;
2657            }
2658            __put_user(cr.pid, &tcr->pid);
2659            __put_user(cr.uid, &tcr->uid);
2660            __put_user(cr.gid, &tcr->gid);
2661            unlock_user_struct(tcr, optval_addr, 1);
2662            if (put_user_u32(len, optlen)) {
2663                return -TARGET_EFAULT;
2664            }
2665            break;
2666        }
2667        case TARGET_SO_PEERSEC: {
2668            char *name;
2669
2670            if (get_user_u32(len, optlen)) {
2671                return -TARGET_EFAULT;
2672            }
2673            if (len < 0) {
2674                return -TARGET_EINVAL;
2675            }
2676            name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2677            if (!name) {
2678                return -TARGET_EFAULT;
2679            }
2680            lv = len;
2681            ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2682                                       name, &lv));
2683            if (put_user_u32(lv, optlen)) {
2684                ret = -TARGET_EFAULT;
2685            }
2686            unlock_user(name, optval_addr, lv);
2687            break;
2688        }
2689        case TARGET_SO_LINGER:
2690        {
2691            struct linger lg;
2692            socklen_t lglen;
2693            struct target_linger *tlg;
2694
2695            if (get_user_u32(len, optlen)) {
2696                return -TARGET_EFAULT;
2697            }
2698            if (len < 0) {
2699                return -TARGET_EINVAL;
2700            }
2701
2702            lglen = sizeof(lg);
2703            ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2704                                       &lg, &lglen));
2705            if (ret < 0) {
2706                return ret;
2707            }
2708            if (len > lglen) {
2709                len = lglen;
2710            }
2711            if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2712                return -TARGET_EFAULT;
2713            }
2714            __put_user(lg.l_onoff, &tlg->l_onoff);
2715            __put_user(lg.l_linger, &tlg->l_linger);
2716            unlock_user_struct(tlg, optval_addr, 1);
2717            if (put_user_u32(len, optlen)) {
2718                return -TARGET_EFAULT;
2719            }
2720            break;
2721        }
2722        /* Options with 'int' argument.  */
2723        case TARGET_SO_DEBUG:
2724            optname = SO_DEBUG;
2725            goto int_case;
2726        case TARGET_SO_REUSEADDR:
2727            optname = SO_REUSEADDR;
2728            goto int_case;
2729#ifdef SO_REUSEPORT
2730        case TARGET_SO_REUSEPORT:
2731            optname = SO_REUSEPORT;
2732            goto int_case;
2733#endif
2734        case TARGET_SO_TYPE:
2735            optname = SO_TYPE;
2736            goto int_case;
2737        case TARGET_SO_ERROR:
2738            optname = SO_ERROR;
2739            goto int_case;
2740        case TARGET_SO_DONTROUTE:
2741            optname = SO_DONTROUTE;
2742            goto int_case;
2743        case TARGET_SO_BROADCAST:
2744            optname = SO_BROADCAST;
2745            goto int_case;
2746        case TARGET_SO_SNDBUF:
2747            optname = SO_SNDBUF;
2748            goto int_case;
2749        case TARGET_SO_RCVBUF:
2750            optname = SO_RCVBUF;
2751            goto int_case;
2752        case TARGET_SO_KEEPALIVE:
2753            optname = SO_KEEPALIVE;
2754            goto int_case;
2755        case TARGET_SO_OOBINLINE:
2756            optname = SO_OOBINLINE;
2757            goto int_case;
2758        case TARGET_SO_NO_CHECK:
2759            optname = SO_NO_CHECK;
2760            goto int_case;
2761        case TARGET_SO_PRIORITY:
2762            optname = SO_PRIORITY;
2763            goto int_case;
2764#ifdef SO_BSDCOMPAT
2765        case TARGET_SO_BSDCOMPAT:
2766            optname = SO_BSDCOMPAT;
2767            goto int_case;
2768#endif
2769        case TARGET_SO_PASSCRED:
2770            optname = SO_PASSCRED;
2771            goto int_case;
2772        case TARGET_SO_TIMESTAMP:
2773            optname = SO_TIMESTAMP;
2774            goto int_case;
2775        case TARGET_SO_RCVLOWAT:
2776            optname = SO_RCVLOWAT;
2777            goto int_case;
2778        case TARGET_SO_ACCEPTCONN:
2779            optname = SO_ACCEPTCONN;
2780            goto int_case;
2781        case TARGET_SO_PROTOCOL:
2782            optname = SO_PROTOCOL;
2783            goto int_case;
2784        case TARGET_SO_DOMAIN:
2785            optname = SO_DOMAIN;
2786            goto int_case;
2787        default:
2788            goto int_case;
2789        }
2790        break;
2791    case SOL_TCP:
2792    case SOL_UDP:
2793        /* TCP and UDP options all take an 'int' value.  */
2794    int_case:
2795        if (get_user_u32(len, optlen))
2796            return -TARGET_EFAULT;
2797        if (len < 0)
2798            return -TARGET_EINVAL;
2799        lv = sizeof(lv);
2800        ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2801        if (ret < 0)
2802            return ret;
2803        if (optname == SO_TYPE) {
2804            val = host_to_target_sock_type(val);
2805        }
2806        if (len > lv)
2807            len = lv;
2808        if (len == 4) {
2809            if (put_user_u32(val, optval_addr))
2810                return -TARGET_EFAULT;
2811        } else {
2812            if (put_user_u8(val, optval_addr))
2813                return -TARGET_EFAULT;
2814        }
2815        if (put_user_u32(len, optlen))
2816            return -TARGET_EFAULT;
2817        break;
2818    case SOL_IP:
2819        switch(optname) {
2820        case IP_TOS:
2821        case IP_TTL:
2822        case IP_HDRINCL:
2823        case IP_ROUTER_ALERT:
2824        case IP_RECVOPTS:
2825        case IP_RETOPTS:
2826        case IP_PKTINFO:
2827        case IP_MTU_DISCOVER:
2828        case IP_RECVERR:
2829        case IP_RECVTOS:
2830#ifdef IP_FREEBIND
2831        case IP_FREEBIND:
2832#endif
2833        case IP_MULTICAST_TTL:
2834        case IP_MULTICAST_LOOP:
2835            if (get_user_u32(len, optlen))
2836                return -TARGET_EFAULT;
2837            if (len < 0)
2838                return -TARGET_EINVAL;
2839            lv = sizeof(lv);
2840            ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2841            if (ret < 0)
2842                return ret;
2843            if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2844                len = 1;
2845                if (put_user_u32(len, optlen)
2846                    || put_user_u8(val, optval_addr))
2847                    return -TARGET_EFAULT;
2848            } else {
2849                if (len > sizeof(int))
2850                    len = sizeof(int);
2851                if (put_user_u32(len, optlen)
2852                    || put_user_u32(val, optval_addr))
2853                    return -TARGET_EFAULT;
2854            }
2855            break;
2856        default:
2857            ret = -TARGET_ENOPROTOOPT;
2858            break;
2859        }
2860        break;
2861    case SOL_IPV6:
2862        switch (optname) {
2863        case IPV6_MTU_DISCOVER:
2864        case IPV6_MTU:
2865        case IPV6_V6ONLY:
2866        case IPV6_RECVPKTINFO:
2867        case IPV6_UNICAST_HOPS:
2868        case IPV6_MULTICAST_HOPS:
2869        case IPV6_MULTICAST_LOOP:
2870        case IPV6_RECVERR:
2871        case IPV6_RECVHOPLIMIT:
2872        case IPV6_2292HOPLIMIT:
2873        case IPV6_CHECKSUM:
2874        case IPV6_ADDRFORM:
2875        case IPV6_2292PKTINFO:
2876        case IPV6_RECVTCLASS:
2877        case IPV6_RECVRTHDR:
2878        case IPV6_2292RTHDR:
2879        case IPV6_RECVHOPOPTS:
2880        case IPV6_2292HOPOPTS:
2881        case IPV6_RECVDSTOPTS:
2882        case IPV6_2292DSTOPTS:
2883        case IPV6_TCLASS:
2884        case IPV6_ADDR_PREFERENCES:
2885#ifdef IPV6_RECVPATHMTU
2886        case IPV6_RECVPATHMTU:
2887#endif
2888#ifdef IPV6_TRANSPARENT
2889        case IPV6_TRANSPARENT:
2890#endif
2891#ifdef IPV6_FREEBIND
2892        case IPV6_FREEBIND:
2893#endif
2894#ifdef IPV6_RECVORIGDSTADDR
2895        case IPV6_RECVORIGDSTADDR:
2896#endif
2897            if (get_user_u32(len, optlen))
2898                return -TARGET_EFAULT;
2899            if (len < 0)
2900                return -TARGET_EINVAL;
2901            lv = sizeof(lv);
2902            ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2903            if (ret < 0)
2904                return ret;
2905            if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2906                len = 1;
2907                if (put_user_u32(len, optlen)
2908                    || put_user_u8(val, optval_addr))
2909                    return -TARGET_EFAULT;
2910            } else {
2911                if (len > sizeof(int))
2912                    len = sizeof(int);
2913                if (put_user_u32(len, optlen)
2914                    || put_user_u32(val, optval_addr))
2915                    return -TARGET_EFAULT;
2916            }
2917            break;
2918        default:
2919            ret = -TARGET_ENOPROTOOPT;
2920            break;
2921        }
2922        break;
2923#ifdef SOL_NETLINK
2924    case SOL_NETLINK:
2925        switch (optname) {
2926        case NETLINK_PKTINFO:
2927        case NETLINK_BROADCAST_ERROR:
2928        case NETLINK_NO_ENOBUFS:
2929#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2930        case NETLINK_LISTEN_ALL_NSID:
2931        case NETLINK_CAP_ACK:
2932#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2933#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2934        case NETLINK_EXT_ACK:
2935#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2936#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2937        case NETLINK_GET_STRICT_CHK:
2938#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2939            if (get_user_u32(len, optlen)) {
2940                return -TARGET_EFAULT;
2941            }
2942            if (len != sizeof(val)) {
2943                return -TARGET_EINVAL;
2944            }
2945            lv = len;
2946            ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2947            if (ret < 0) {
2948                return ret;
2949            }
2950            if (put_user_u32(lv, optlen)
2951                || put_user_u32(val, optval_addr)) {
2952                return -TARGET_EFAULT;
2953            }
2954            break;
2955#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2956        case NETLINK_LIST_MEMBERSHIPS:
2957        {
2958            uint32_t *results;
2959            int i;
2960            if (get_user_u32(len, optlen)) {
2961                return -TARGET_EFAULT;
2962            }
2963            if (len < 0) {
2964                return -TARGET_EINVAL;
2965            }
2966            results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2967            if (!results && len > 0) {
2968                return -TARGET_EFAULT;
2969            }
2970            lv = len;
2971            ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2972            if (ret < 0) {
2973                unlock_user(results, optval_addr, 0);
2974                return ret;
2975            }
2976            /* swap host endianess to target endianess. */
2977            for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2978                results[i] = tswap32(results[i]);
2979            }
2980            if (put_user_u32(lv, optlen)) {
2981                return -TARGET_EFAULT;
2982            }
2983            unlock_user(results, optval_addr, 0);
2984            break;
2985        }
2986#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2987        default:
2988            goto unimplemented;
2989        }
2990        break;
2991#endif /* SOL_NETLINK */
2992    default:
2993    unimplemented:
2994        qemu_log_mask(LOG_UNIMP,
2995                      "getsockopt level=%d optname=%d not yet supported\n",
2996                      level, optname);
2997        ret = -TARGET_EOPNOTSUPP;
2998        break;
2999    }
3000    return ret;
3001}
3002
3003/* Convert target low/high pair representing file offset into the host
3004 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3005 * as the kernel doesn't handle them either.
3006 */
3007static void target_to_host_low_high(abi_ulong tlow,
3008                                    abi_ulong thigh,
3009                                    unsigned long *hlow,
3010                                    unsigned long *hhigh)
3011{
3012    uint64_t off = tlow |
3013        ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3014        TARGET_LONG_BITS / 2;
3015
3016    *hlow = off;
3017    *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3018}
3019
3020static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3021                                abi_ulong count, int copy)
3022{
3023    struct target_iovec *target_vec;
3024    struct iovec *vec;
3025    abi_ulong total_len, max_len;
3026    int i;
3027    int err = 0;
3028    bool bad_address = false;
3029
3030    if (count == 0) {
3031        errno = 0;
3032        return NULL;
3033    }
3034    if (count > IOV_MAX) {
3035        errno = EINVAL;
3036        return NULL;
3037    }
3038
3039    vec = g_try_new0(struct iovec, count);
3040    if (vec == NULL) {
3041        errno = ENOMEM;
3042        return NULL;
3043    }
3044
3045    target_vec = lock_user(VERIFY_READ, target_addr,
3046                           count * sizeof(struct target_iovec), 1);
3047    if (target_vec == NULL) {
3048        err = EFAULT;
3049        goto fail2;
3050    }
3051
3052    /* ??? If host page size > target page size, this will result in a
3053       value larger than what we can actually support.  */
3054    max_len = 0x7fffffff & TARGET_PAGE_MASK;
3055    total_len = 0;
3056
3057    for (i = 0; i < count; i++) {
3058        abi_ulong base = tswapal(target_vec[i].iov_base);
3059        abi_long len = tswapal(target_vec[i].iov_len);
3060
3061        if (len < 0) {
3062            err = EINVAL;
3063            goto fail;
3064        } else if (len == 0) {
3065            /* Zero length pointer is ignored.  */
3066            vec[i].iov_base = 0;
3067        } else {
3068            vec[i].iov_base = lock_user(type, base, len, copy);
3069            /* If the first buffer pointer is bad, this is a fault.  But
3070             * subsequent bad buffers will result in a partial write; this
3071             * is realized by filling the vector with null pointers and
3072             * zero lengths. */
3073            if (!vec[i].iov_base) {
3074                if (i == 0) {
3075                    err = EFAULT;
3076                    goto fail;
3077                } else {
3078                    bad_address = true;
3079                }
3080            }
3081            if (bad_address) {
3082                len = 0;
3083            }
3084            if (len > max_len - total_len) {
3085                len = max_len - total_len;
3086            }
3087        }
3088        vec[i].iov_len = len;
3089        total_len += len;
3090    }
3091
3092    unlock_user(target_vec, target_addr, 0);
3093    return vec;
3094
3095 fail:
3096    while (--i >= 0) {
3097        if (tswapal(target_vec[i].iov_len) > 0) {
3098            unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3099        }
3100    }
3101    unlock_user(target_vec, target_addr, 0);
3102 fail2:
3103    g_free(vec);
3104    errno = err;
3105    return NULL;
3106}
3107
3108static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3109                         abi_ulong count, int copy)
3110{
3111    struct target_iovec *target_vec;
3112    int i;
3113
3114    target_vec = lock_user(VERIFY_READ, target_addr,
3115                           count * sizeof(struct target_iovec), 1);
3116    if (target_vec) {
3117        for (i = 0; i < count; i++) {
3118            abi_ulong base = tswapal(target_vec[i].iov_base);
3119            abi_long len = tswapal(target_vec[i].iov_len);
3120            if (len < 0) {
3121                break;
3122            }
3123            unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3124        }
3125        unlock_user(target_vec, target_addr, 0);
3126    }
3127
3128    g_free(vec);
3129}
3130
3131static inline int target_to_host_sock_type(int *type)
3132{
3133    int host_type = 0;
3134    int target_type = *type;
3135
3136    switch (target_type & TARGET_SOCK_TYPE_MASK) {
3137    case TARGET_SOCK_DGRAM:
3138        host_type = SOCK_DGRAM;
3139        break;
3140    case TARGET_SOCK_STREAM:
3141        host_type = SOCK_STREAM;
3142        break;
3143    default:
3144        host_type = target_type & TARGET_SOCK_TYPE_MASK;
3145        break;
3146    }
3147    if (target_type & TARGET_SOCK_CLOEXEC) {
3148#if defined(SOCK_CLOEXEC)
3149        host_type |= SOCK_CLOEXEC;
3150#else
3151        return -TARGET_EINVAL;
3152#endif
3153    }
3154    if (target_type & TARGET_SOCK_NONBLOCK) {
3155#if defined(SOCK_NONBLOCK)
3156        host_type |= SOCK_NONBLOCK;
3157#elif !defined(O_NONBLOCK)
3158        return -TARGET_EINVAL;
3159#endif
3160    }
3161    *type = host_type;
3162    return 0;
3163}
3164
3165/* Try to emulate socket type flags after socket creation.  */
3166static int sock_flags_fixup(int fd, int target_type)
3167{
3168#if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3169    if (target_type & TARGET_SOCK_NONBLOCK) {
3170        int flags = fcntl(fd, F_GETFL);
3171        if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3172            close(fd);
3173            return -TARGET_EINVAL;
3174        }
3175    }
3176#endif
3177    return fd;
3178}
3179
3180/* do_socket() Must return target values and target errnos. */
3181static abi_long do_socket(int domain, int type, int protocol)
3182{
3183    int target_type = type;
3184    int ret;
3185
3186    ret = target_to_host_sock_type(&type);
3187    if (ret) {
3188        return ret;
3189    }
3190
3191    if (domain == PF_NETLINK && !(
3192#ifdef CONFIG_RTNETLINK
3193         protocol == NETLINK_ROUTE ||
3194#endif
3195         protocol == NETLINK_KOBJECT_UEVENT ||
3196         protocol == NETLINK_AUDIT)) {
3197        return -TARGET_EPROTONOSUPPORT;
3198    }
3199
3200    if (domain == AF_PACKET ||
3201        (domain == AF_INET && type == SOCK_PACKET)) {
3202        protocol = tswap16(protocol);
3203    }
3204
3205    ret = get_errno(socket(domain, type, protocol));
3206    if (ret >= 0) {
3207        ret = sock_flags_fixup(ret, target_type);
3208        if (type == SOCK_PACKET) {
3209            /* Manage an obsolete case :
3210             * if socket type is SOCK_PACKET, bind by name
3211             */
3212            fd_trans_register(ret, &target_packet_trans);
3213        } else if (domain == PF_NETLINK) {
3214            switch (protocol) {
3215#ifdef CONFIG_RTNETLINK
3216            case NETLINK_ROUTE:
3217                fd_trans_register(ret, &target_netlink_route_trans);
3218                break;
3219#endif
3220            case NETLINK_KOBJECT_UEVENT:
3221                /* nothing to do: messages are strings */
3222                break;
3223            case NETLINK_AUDIT:
3224                fd_trans_register(ret, &target_netlink_audit_trans);
3225                break;
3226            default:
3227                g_assert_not_reached();
3228            }
3229        }
3230    }
3231    return ret;
3232}
3233
3234/* do_bind() Must return target values and target errnos. */
3235static abi_long do_bind(int sockfd, abi_ulong target_addr,
3236                        socklen_t addrlen)
3237{
3238    void *addr;
3239    abi_long ret;
3240
3241    if ((int)addrlen < 0) {
3242        return -TARGET_EINVAL;
3243    }
3244
3245    addr = alloca(addrlen+1);
3246
3247    ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3248    if (ret)
3249        return ret;
3250
3251    return get_errno(bind(sockfd, addr, addrlen));
3252}
3253
3254/* do_connect() Must return target values and target errnos. */
3255static abi_long do_connect(int sockfd, abi_ulong target_addr,
3256                           socklen_t addrlen)
3257{
3258    void *addr;
3259    abi_long ret;
3260
3261    if ((int)addrlen < 0) {
3262        return -TARGET_EINVAL;
3263    }
3264
3265    addr = alloca(addrlen+1);
3266
3267    ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3268    if (ret)
3269        return ret;
3270
3271    return get_errno(safe_connect(sockfd, addr, addrlen));
3272}
3273
3274/* do_sendrecvmsg_locked() Must return target values and target errnos. */
3275static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3276                                      int flags, int send)
3277{
3278    abi_long ret, len;
3279    struct msghdr msg;
3280    abi_ulong count;
3281    struct iovec *vec;
3282    abi_ulong target_vec;
3283
3284    if (msgp->msg_name) {
3285        msg.msg_namelen = tswap32(msgp->msg_namelen);
3286        msg.msg_name = alloca(msg.msg_namelen+1);
3287        ret = target_to_host_sockaddr(fd, msg.msg_name,
3288                                      tswapal(msgp->msg_name),
3289                                      msg.msg_namelen);
3290        if (ret == -TARGET_EFAULT) {
3291            /* For connected sockets msg_name and msg_namelen must
3292             * be ignored, so returning EFAULT immediately is wrong.
3293             * Instead, pass a bad msg_name to the host kernel, and
3294             * let it decide whether to return EFAULT or not.
3295             */
3296            msg.msg_name = (void *)-1;
3297        } else if (ret) {
3298            goto out2;
3299        }
3300    } else {
3301        msg.msg_name = NULL;
3302        msg.msg_namelen = 0;
3303    }
3304    msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3305    msg.msg_control = alloca(msg.msg_controllen);
3306    memset(msg.msg_control, 0, msg.msg_controllen);
3307
3308    msg.msg_flags = tswap32(msgp->msg_flags);
3309
3310    count = tswapal(msgp->msg_iovlen);
3311    target_vec = tswapal(msgp->msg_iov);
3312
3313    if (count > IOV_MAX) {
3314        /* sendrcvmsg returns a different errno for this condition than
3315         * readv/writev, so we must catch it here before lock_iovec() does.
3316         */
3317        ret = -TARGET_EMSGSIZE;
3318        goto out2;
3319    }
3320
3321    vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3322                     target_vec, count, send);
3323    if (vec == NULL) {
3324        ret = -host_to_target_errno(errno);
3325        goto out2;
3326    }
3327    msg.msg_iovlen = count;
3328    msg.msg_iov = vec;
3329
3330    if (send) {
3331        if (fd_trans_target_to_host_data(fd)) {
3332            void *host_msg;
3333
3334            host_msg = g_malloc(msg.msg_iov->iov_len);
3335            memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3336            ret = fd_trans_target_to_host_data(fd)(host_msg,
3337                                                   msg.msg_iov->iov_len);
3338            if (ret >= 0) {
3339                msg.msg_iov->iov_base = host_msg;
3340                ret = get_errno(safe_sendmsg(fd, &msg, flags));
3341            }
3342            g_free(host_msg);
3343        } else {
3344            ret = target_to_host_cmsg(&msg, msgp);
3345            if (ret == 0) {
3346                ret = get_errno(safe_sendmsg(fd, &msg, flags));
3347            }
3348        }
3349    } else {
3350        ret = get_errno(safe_recvmsg(fd, &msg, flags));
3351        if (!is_error(ret)) {
3352            len = ret;
3353            if (fd_trans_host_to_target_data(fd)) {
3354                ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3355                                               MIN(msg.msg_iov->iov_len, len));
3356            }
3357            if (!is_error(ret)) {
3358                ret = host_to_target_cmsg(msgp, &msg);
3359            }
3360            if (!is_error(ret)) {
3361                msgp->msg_namelen = tswap32(msg.msg_namelen);
3362                msgp->msg_flags = tswap32(msg.msg_flags);
3363                if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3364                    ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3365                                    msg.msg_name, msg.msg_namelen);
3366                    if (ret) {
3367                        goto out;
3368                    }
3369                }
3370
3371                ret = len;
3372            }
3373        }
3374    }
3375
3376out:
3377    unlock_iovec(vec, target_vec, count, !send);
3378out2:
3379    return ret;
3380}
3381
3382static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3383                               int flags, int send)
3384{
3385    abi_long ret;
3386    struct target_msghdr *msgp;
3387
3388    if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3389                          msgp,
3390                          target_msg,
3391                          send ? 1 : 0)) {
3392        return -TARGET_EFAULT;
3393    }
3394    ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3395    unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3396    return ret;
3397}
3398
3399/* We don't rely on the C library to have sendmmsg/recvmmsg support,
3400 * so it might not have this *mmsg-specific flag either.
3401 */
3402#ifndef MSG_WAITFORONE
3403#define MSG_WAITFORONE 0x10000
3404#endif
3405
3406static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3407                                unsigned int vlen, unsigned int flags,
3408                                int send)
3409{
3410    struct target_mmsghdr *mmsgp;
3411    abi_long ret = 0;
3412    int i;
3413
3414    if (vlen > UIO_MAXIOV) {
3415        vlen = UIO_MAXIOV;
3416    }
3417
3418    mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3419    if (!mmsgp) {
3420        return -TARGET_EFAULT;
3421    }
3422
3423    for (i = 0; i < vlen; i++) {
3424        ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3425        if (is_error(ret)) {
3426            break;
3427        }
3428        mmsgp[i].msg_len = tswap32(ret);
3429        /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3430        if (flags & MSG_WAITFORONE) {
3431            flags |= MSG_DONTWAIT;
3432        }
3433    }
3434
3435    unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3436
3437    /* Return number of datagrams sent if we sent any at all;
3438     * otherwise return the error.
3439     */
3440    if (i) {
3441        return i;
3442    }
3443    return ret;
3444}
3445
3446/* do_accept4() Must return target values and target errnos. */
3447static abi_long do_accept4(int fd, abi_ulong target_addr,
3448                           abi_ulong target_addrlen_addr, int flags)
3449{
3450    socklen_t addrlen, ret_addrlen;
3451    void *addr;
3452    abi_long ret;
3453    int host_flags;
3454
3455    host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3456
3457    if (target_addr == 0) {
3458        return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3459    }
3460
3461    /* linux returns EFAULT if addrlen pointer is invalid */
3462    if (get_user_u32(addrlen, target_addrlen_addr))
3463        return -TARGET_EFAULT;
3464
3465    if ((int)addrlen < 0) {
3466        return -TARGET_EINVAL;
3467    }
3468
3469    if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3470        return -TARGET_EFAULT;
3471    }
3472
3473    addr = alloca(addrlen);
3474
3475    ret_addrlen = addrlen;
3476    ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3477    if (!is_error(ret)) {
3478        host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3479        if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3480            ret = -TARGET_EFAULT;
3481        }
3482    }
3483    return ret;
3484}
3485
3486/* do_getpeername() Must return target values and target errnos. */
3487static abi_long do_getpeername(int fd, abi_ulong target_addr,
3488                               abi_ulong target_addrlen_addr)
3489{
3490    socklen_t addrlen, ret_addrlen;
3491    void *addr;
3492    abi_long ret;
3493
3494    if (get_user_u32(addrlen, target_addrlen_addr))
3495        return -TARGET_EFAULT;
3496
3497    if ((int)addrlen < 0) {
3498        return -TARGET_EINVAL;
3499    }
3500
3501    if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3502        return -TARGET_EFAULT;
3503    }
3504
3505    addr = alloca(addrlen);
3506
3507    ret_addrlen = addrlen;
3508    ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3509    if (!is_error(ret)) {
3510        host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3511        if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3512            ret = -TARGET_EFAULT;
3513        }
3514    }
3515    return ret;
3516}
3517
3518/* do_getsockname() Must return target values and target errnos. */
3519static abi_long do_getsockname(int fd, abi_ulong target_addr,
3520                               abi_ulong target_addrlen_addr)
3521{
3522    socklen_t addrlen, ret_addrlen;
3523    void *addr;
3524    abi_long ret;
3525
3526    if (get_user_u32(addrlen, target_addrlen_addr))
3527        return -TARGET_EFAULT;
3528
3529    if ((int)addrlen < 0) {
3530        return -TARGET_EINVAL;
3531    }
3532
3533    if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3534        return -TARGET_EFAULT;
3535    }
3536
3537    addr = alloca(addrlen);
3538
3539    ret_addrlen = addrlen;
3540    ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3541    if (!is_error(ret)) {
3542        host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3543        if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3544            ret = -TARGET_EFAULT;
3545        }
3546    }
3547    return ret;
3548}
3549
3550/* do_socketpair() Must return target values and target errnos. */
3551static abi_long do_socketpair(int domain, int type, int protocol,
3552                              abi_ulong target_tab_addr)
3553{
3554    int tab[2];
3555    abi_long ret;
3556
3557    target_to_host_sock_type(&type);
3558
3559    ret = get_errno(socketpair(domain, type, protocol, tab));
3560    if (!is_error(ret)) {
3561        if (put_user_s32(tab[0], target_tab_addr)
3562            || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3563            ret = -TARGET_EFAULT;
3564    }
3565    return ret;
3566}
3567
3568/* do_sendto() Must return target values and target errnos. */
3569static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3570                          abi_ulong target_addr, socklen_t addrlen)
3571{
3572    void *addr;
3573    void *host_msg;
3574    void *copy_msg = NULL;
3575    abi_long ret;
3576
3577    if ((int)addrlen < 0) {
3578        return -TARGET_EINVAL;
3579    }
3580
3581    host_msg = lock_user(VERIFY_READ, msg, len, 1);
3582    if (!host_msg)
3583        return -TARGET_EFAULT;
3584    if (fd_trans_target_to_host_data(fd)) {
3585        copy_msg = host_msg;
3586        host_msg = g_malloc(len);
3587        memcpy(host_msg, copy_msg, len);
3588        ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3589        if (ret < 0) {
3590            goto fail;
3591        }
3592    }
3593    if (target_addr) {
3594        addr = alloca(addrlen+1);
3595        ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3596        if (ret) {
3597            goto fail;
3598        }
3599        ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3600    } else {
3601        ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3602    }
3603fail:
3604    if (copy_msg) {
3605        g_free(host_msg);
3606        host_msg = copy_msg;
3607    }
3608    unlock_user(host_msg, msg, 0);
3609    return ret;
3610}
3611
3612/* do_recvfrom() Must return target values and target errnos. */
3613static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3614                            abi_ulong target_addr,
3615                            abi_ulong target_addrlen)
3616{
3617    socklen_t addrlen, ret_addrlen;
3618    void *addr;
3619    void *host_msg;
3620    abi_long ret;
3621
3622    if (!msg) {
3623        host_msg = NULL;
3624    } else {
3625        host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3626        if (!host_msg) {
3627            return -TARGET_EFAULT;
3628        }
3629    }
3630    if (target_addr) {
3631        if (get_user_u32(addrlen, target_addrlen)) {
3632            ret = -TARGET_EFAULT;
3633            goto fail;
3634        }
3635        if ((int)addrlen < 0) {
3636            ret = -TARGET_EINVAL;
3637            goto fail;
3638        }
3639        addr = alloca(addrlen);
3640        ret_addrlen = addrlen;
3641        ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3642                                      addr, &ret_addrlen));
3643    } else {
3644        addr = NULL; /* To keep compiler quiet.  */
3645        addrlen = 0; /* To keep compiler quiet.  */
3646        ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3647    }
3648    if (!is_error(ret)) {
3649        if (fd_trans_host_to_target_data(fd)) {
3650            abi_long trans;
3651            trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3652            if (is_error(trans)) {
3653                ret = trans;
3654                goto fail;
3655            }
3656        }
3657        if (target_addr) {
3658            host_to_target_sockaddr(target_addr, addr,
3659                                    MIN(addrlen, ret_addrlen));
3660            if (put_user_u32(ret_addrlen, target_addrlen)) {
3661                ret = -TARGET_EFAULT;
3662                goto fail;
3663            }
3664        }
3665        unlock_user(host_msg, msg, len);
3666    } else {
3667fail:
3668        unlock_user(host_msg, msg, 0);
3669    }
3670    return ret;
3671}
3672
3673#ifdef TARGET_NR_socketcall
3674/* do_socketcall() must return target values and target errnos. */
3675static abi_long do_socketcall(int num, abi_ulong vptr)
3676{
3677    static const unsigned nargs[] = { /* number of arguments per operation */
3678        [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3679        [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3680        [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3681        [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3682        [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3683        [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3684        [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3685        [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3686        [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3687        [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3688        [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3689        [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3690        [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3691        [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3692        [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3693        [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3694        [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3695        [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3696        [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3697        [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3698    };
3699    abi_long a[6]; /* max 6 args */
3700    unsigned i;
3701
3702    /* check the range of the first argument num */
3703    /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3704    if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3705        return -TARGET_EINVAL;
3706    }
3707    /* ensure we have space for args */
3708    if (nargs[num] > ARRAY_SIZE(a)) {
3709        return -TARGET_EINVAL;
3710    }
3711    /* collect the arguments in a[] according to nargs[] */
3712    for (i = 0; i < nargs[num]; ++i) {
3713        if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3714            return -TARGET_EFAULT;
3715        }
3716    }
3717    /* now when we have the args, invoke the appropriate underlying function */
3718    switch (num) {
3719    case TARGET_SYS_SOCKET: /* domain, type, protocol */
3720        return do_socket(a[0], a[1], a[2]);
3721    case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3722        return do_bind(a[0], a[1], a[2]);
3723    case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3724        return do_connect(a[0], a[1], a[2]);
3725    case TARGET_SYS_LISTEN: /* sockfd, backlog */
3726        return get_errno(listen(a[0], a[1]));
3727    case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3728        return do_accept4(a[0], a[1], a[2], 0);
3729    case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3730        return do_getsockname(a[0], a[1], a[2]);
3731    case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3732        return do_getpeername(a[0], a[1], a[2]);
3733    case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3734        return do_socketpair(a[0], a[1], a[2], a[3]);
3735    case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3736        return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3737    case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3738        return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3739    case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3740        return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3741    case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3742        return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3743    case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3744        return get_errno(shutdown(a[0], a[1]));
3745    case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3746        return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3747    case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3748        return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3749    case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3750        return do_sendrecvmsg(a[0], a[1], a[2], 1);
3751    case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3752        return do_sendrecvmsg(a[0], a[1], a[2], 0);
3753    case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3754        return do_accept4(a[0], a[1], a[2], a[3]);
3755    case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3756        return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3757    case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3758        return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3759    default:
3760        qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3761        return -TARGET_EINVAL;
3762    }
3763}
3764#endif
3765
3766#define N_SHM_REGIONS   32
3767
3768static struct shm_region {
3769    abi_ulong start;
3770    abi_ulong size;
3771    bool in_use;
3772} shm_regions[N_SHM_REGIONS];
3773
3774#ifndef TARGET_SEMID64_DS
3775/* asm-generic version of this struct */
3776struct target_semid64_ds
3777{
3778  struct target_ipc_perm sem_perm;
3779  abi_ulong sem_otime;
3780#if TARGET_ABI_BITS == 32
3781  abi_ulong __unused1;
3782#endif
3783  abi_ulong sem_ctime;
3784#if TARGET_ABI_BITS == 32
3785  abi_ulong __unused2;
3786#endif
3787  abi_ulong sem_nsems;
3788  abi_ulong __unused3;
3789  abi_ulong __unused4;
3790};
3791#endif
3792
3793static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3794                                               abi_ulong target_addr)
3795{
3796    struct target_ipc_perm *target_ip;
3797    struct target_semid64_ds *target_sd;
3798
3799    if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3800        return -TARGET_EFAULT;
3801    target_ip = &(target_sd->sem_perm);
3802    host_ip->__key = tswap32(target_ip->__key);
3803    host_ip->uid = tswap32(target_ip->uid);
3804    host_ip->gid = tswap32(target_ip->gid);
3805    host_ip->cuid = tswap32(target_ip->cuid);
3806    host_ip->cgid = tswap32(target_ip->cgid);
3807#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3808    host_ip->mode = tswap32(target_ip->mode);
3809#else
3810    host_ip->mode = tswap16(target_ip->mode);
3811#endif
3812#if defined(TARGET_PPC)
3813    host_ip->__seq = tswap32(target_ip->__seq);
3814#else
3815    host_ip->__seq = tswap16(target_ip->__seq);
3816#endif
3817    unlock_user_struct(target_sd, target_addr, 0);
3818    return 0;
3819}
3820
3821static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3822                                               struct ipc_perm *host_ip)
3823{
3824    struct target_ipc_perm *target_ip;
3825    struct target_semid64_ds *target_sd;
3826
3827    if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3828        return -TARGET_EFAULT;
3829    target_ip = &(target_sd->sem_perm);
3830    target_ip->__key = tswap32(host_ip->__key);
3831    target_ip->uid = tswap32(host_ip->uid);
3832    target_ip->gid = tswap32(host_ip->gid);
3833    target_ip->cuid = tswap32(host_ip->cuid);
3834    target_ip->cgid = tswap32(host_ip->cgid);
3835#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3836    target_ip->mode = tswap32(host_ip->mode);
3837#else
3838    target_ip->mode = tswap16(host_ip->mode);
3839#endif
3840#if defined(TARGET_PPC)
3841    target_ip->__seq = tswap32(host_ip->__seq);
3842#else
3843    target_ip->__seq = tswap16(host_ip->__seq);
3844#endif
3845    unlock_user_struct(target_sd, target_addr, 1);
3846    return 0;
3847}
3848
3849static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3850                                               abi_ulong target_addr)
3851{
3852    struct target_semid64_ds *target_sd;
3853
3854    if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3855        return -TARGET_EFAULT;
3856    if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3857        return -TARGET_EFAULT;
3858    host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3859    host_sd->sem_otime = tswapal(target_sd->sem_otime);
3860    host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3861    unlock_user_struct(target_sd, target_addr, 0);
3862    return 0;
3863}
3864
3865static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3866                                               struct semid_ds *host_sd)
3867{
3868    struct target_semid64_ds *target_sd;
3869
3870    if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3871        return -TARGET_EFAULT;
3872    if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3873        return -TARGET_EFAULT;
3874    target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3875    target_sd->sem_otime = tswapal(host_sd->sem_otime);
3876    target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3877    unlock_user_struct(target_sd, target_addr, 1);
3878    return 0;
3879}
3880
3881struct target_seminfo {
3882    int semmap;
3883    int semmni;
3884    int semmns;
3885    int semmnu;
3886    int semmsl;
3887    int semopm;
3888    int semume;
3889    int semusz;
3890    int semvmx;
3891    int semaem;
3892};
3893
3894static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3895                                              struct seminfo *host_seminfo)
3896{
3897    struct target_seminfo *target_seminfo;
3898    if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3899        return -TARGET_EFAULT;
3900    __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3901    __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3902    __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3903    __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3904    __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3905    __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3906    __put_user(host_seminfo->semume, &target_seminfo->semume);
3907    __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3908    __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3909    __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3910    unlock_user_struct(target_seminfo, target_addr, 1);
3911    return 0;
3912}
3913
3914union semun {
3915        int val;
3916        struct semid_ds *buf;
3917        unsigned short *array;
3918        struct seminfo *__buf;
3919};
3920
3921union target_semun {
3922        int val;
3923        abi_ulong buf;
3924        abi_ulong array;
3925        abi_ulong __buf;
3926};
3927
3928static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3929                                               abi_ulong target_addr)
3930{
3931    int nsems;
3932    unsigned short *array;
3933    union semun semun;
3934    struct semid_ds semid_ds;
3935    int i, ret;
3936
3937    semun.buf = &semid_ds;
3938
3939    ret = semctl(semid, 0, IPC_STAT, semun);
3940    if (ret == -1)
3941        return get_errno(ret);
3942
3943    nsems = semid_ds.sem_nsems;
3944
3945    *host_array = g_try_new(unsigned short, nsems);
3946    if (!*host_array) {
3947        return -TARGET_ENOMEM;
3948    }
3949    array = lock_user(VERIFY_READ, target_addr,
3950                      nsems*sizeof(unsigned short), 1);
3951    if (!array) {
3952        g_free(*host_array);
3953        return -TARGET_EFAULT;
3954    }
3955
3956    for(i=0; i<nsems; i++) {
3957        __get_user((*host_array)[i], &array[i]);
3958    }
3959    unlock_user(array, target_addr, 0);
3960
3961    return 0;
3962}
3963
3964static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3965                                               unsigned short **host_array)
3966{
3967    int nsems;
3968    unsigned short *array;
3969    union semun semun;
3970    struct semid_ds semid_ds;
3971    int i, ret;
3972
3973    semun.buf = &semid_ds;
3974
3975    ret = semctl(semid, 0, IPC_STAT, semun);
3976    if (ret == -1)
3977        return get_errno(ret);
3978
3979    nsems = semid_ds.sem_nsems;
3980
3981    array = lock_user(VERIFY_WRITE, target_addr,
3982                      nsems*sizeof(unsigned short), 0);
3983    if (!array)
3984        return -TARGET_EFAULT;
3985
3986    for(i=0; i<nsems; i++) {
3987        __put_user((*host_array)[i], &array[i]);
3988    }
3989    g_free(*host_array);
3990    unlock_user(array, target_addr, 1);
3991
3992    return 0;
3993}
3994
3995static inline abi_long do_semctl(int semid, int semnum, int cmd,
3996                                 abi_ulong target_arg)
3997{
3998    union target_semun target_su = { .buf = target_arg };
3999    union semun arg;
4000    struct semid_ds dsarg;
4001    unsigned short *array = NULL;
4002    struct seminfo seminfo;
4003    abi_long ret = -TARGET_EINVAL;
4004    abi_long err;
4005    cmd &= 0xff;
4006
4007    switch( cmd ) {
4008        case GETVAL:
4009        case SETVAL:
4010            /* In 64 bit cross-endian situations, we will erroneously pick up
4011             * the wrong half of the union for the "val" element.  To rectify
4012             * this, the entire 8-byte structure is byteswapped, followed by
4013             * a swap of the 4 byte val field. In other cases, the data is
4014             * already in proper host byte order. */
4015            if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4016                target_su.buf = tswapal(target_su.buf);
4017                arg.val = tswap32(target_su.val);
4018            } else {
4019                arg.val = target_su.val;
4020            }
4021            ret = get_errno(semctl(semid, semnum, cmd, arg));
4022            break;
4023        case GETALL:
4024        case SETALL:
4025            err = target_to_host_semarray(semid, &array, target_su.array);
4026            if (err)
4027                return err;
4028            arg.array = array;
4029            ret = get_errno(semctl(semid, semnum, cmd, arg));
4030            err = host_to_target_semarray(semid, target_su.array, &array);
4031            if (err)
4032                return err;
4033            break;
4034        case IPC_STAT:
4035        case IPC_SET:
4036        case SEM_STAT:
4037            err = target_to_host_semid_ds(&dsarg, target_su.buf);
4038            if (err)
4039                return err;
4040            arg.buf = &dsarg;
4041            ret = get_errno(semctl(semid, semnum, cmd, arg));
4042            err = host_to_target_semid_ds(target_su.buf, &dsarg);
4043            if (err)
4044                return err;
4045            break;
4046        case IPC_INFO:
4047        case SEM_INFO:
4048            arg.__buf = &seminfo;
4049            ret = get_errno(semctl(semid, semnum, cmd, arg));
4050            err = host_to_target_seminfo(target_su.__buf, &seminfo);
4051            if (err)
4052                return err;
4053            break;
4054        case IPC_RMID:
4055        case GETPID:
4056        case GETNCNT:
4057        case GETZCNT:
4058            ret = get_errno(semctl(semid, semnum, cmd, NULL));
4059            break;
4060    }
4061
4062    return ret;
4063}
4064
4065struct target_sembuf {
4066    unsigned short sem_num;
4067    short sem_op;
4068    short sem_flg;
4069};
4070
4071static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4072                                             abi_ulong target_addr,
4073                                             unsigned nsops)
4074{
4075    struct target_sembuf *target_sembuf;
4076    int i;
4077
4078    target_sembuf = lock_user(VERIFY_READ, target_addr,
4079                              nsops*sizeof(struct target_sembuf), 1);
4080    if (!target_sembuf)
4081        return -TARGET_EFAULT;
4082
4083    for(i=0; i<nsops; i++) {
4084        __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4085        __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4086        __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4087    }
4088
4089    unlock_user(target_sembuf, target_addr, 0);
4090
4091    return 0;
4092}
4093
4094#if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4095    defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4096
4097/*
4098 * This macro is required to handle the s390 variants, which passes the
4099 * arguments in a different order than default.
4100 */
4101#ifdef __s390x__
4102#define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4103  (__nsops), (__timeout), (__sops)
4104#else
4105#define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4106  (__nsops), 0, (__sops), (__timeout)
4107#endif
4108
4109static inline abi_long do_semtimedop(int semid,
4110                                     abi_long ptr,
4111                                     unsigned nsops,
4112                                     abi_long timeout, bool time64)
4113{
4114    struct sembuf *sops;
4115    struct timespec ts, *pts = NULL;
4116    abi_long ret;
4117
4118    if (timeout) {
4119        pts = &ts;
4120        if (time64) {
4121            if (target_to_host_timespec64(pts, timeout)) {
4122                return -TARGET_EFAULT;
4123            }
4124        } else {
4125            if (target_to_host_timespec(pts, timeout)) {
4126                return -TARGET_EFAULT;
4127            }
4128        }
4129    }
4130
4131    if (nsops > TARGET_SEMOPM) {
4132        return -TARGET_E2BIG;
4133    }
4134
4135    sops = g_new(struct sembuf, nsops);
4136
4137    if (target_to_host_sembuf(sops, ptr, nsops)) {
4138        g_free(sops);
4139        return -TARGET_EFAULT;
4140    }
4141
4142    ret = -TARGET_ENOSYS;
4143#ifdef __NR_semtimedop
4144    ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4145#endif
4146#ifdef __NR_ipc
4147    if (ret == -TARGET_ENOSYS) {
4148        ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4149                                 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4150    }
4151#endif
4152    g_free(sops);
4153    return ret;
4154}
4155#endif
4156
4157struct target_msqid_ds
4158{
4159    struct target_ipc_perm msg_perm;
4160    abi_ulong msg_stime;
4161#if TARGET_ABI_BITS == 32
4162    abi_ulong __unused1;
4163#endif
4164    abi_ulong msg_rtime;
4165#if TARGET_ABI_BITS == 32
4166    abi_ulong __unused2;
4167#endif
4168    abi_ulong msg_ctime;
4169#if TARGET_ABI_BITS == 32
4170    abi_ulong __unused3;
4171#endif
4172    abi_ulong __msg_cbytes;
4173    abi_ulong msg_qnum;
4174    abi_ulong msg_qbytes;
4175    abi_ulong msg_lspid;
4176    abi_ulong msg_lrpid;
4177    abi_ulong __unused4;
4178    abi_ulong __unused5;
4179};
4180
4181static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4182                                               abi_ulong target_addr)
4183{
4184    struct target_msqid_ds *target_md;
4185
4186    if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4187        return -TARGET_EFAULT;
4188    if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4189        return -TARGET_EFAULT;
4190    host_md->msg_stime = tswapal(target_md->msg_stime);
4191    host_md->msg_rtime = tswapal(target_md->msg_rtime);
4192    host_md->msg_ctime = tswapal(target_md->msg_ctime);
4193    host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4194    host_md->msg_qnum = tswapal(target_md->msg_qnum);
4195    host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4196    host_md->msg_lspid = tswapal(target_md->msg_lspid);
4197    host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4198    unlock_user_struct(target_md, target_addr, 0);
4199    return 0;
4200}
4201
4202static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4203                                               struct msqid_ds *host_md)
4204{
4205    struct target_msqid_ds *target_md;
4206
4207    if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4208        return -TARGET_EFAULT;
4209    if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4210        return -TARGET_EFAULT;
4211    target_md->msg_stime = tswapal(host_md->msg_stime);
4212    target_md->msg_rtime = tswapal(host_md->msg_rtime);
4213    target_md->msg_ctime = tswapal(host_md->msg_ctime);
4214    target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4215    target_md->msg_qnum = tswapal(host_md->msg_qnum);
4216    target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4217    target_md->msg_lspid = tswapal(host_md->msg_lspid);
4218    target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4219    unlock_user_struct(target_md, target_addr, 1);
4220    return 0;
4221}
4222
4223struct target_msginfo {
4224    int msgpool;
4225    int msgmap;
4226    int msgmax;
4227    int msgmnb;
4228    int msgmni;
4229    int msgssz;
4230    int msgtql;
4231    unsigned short int msgseg;
4232};
4233
4234static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4235                                              struct msginfo *host_msginfo)
4236{
4237    struct target_msginfo *target_msginfo;
4238    if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4239        return -TARGET_EFAULT;
4240    __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4241    __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4242    __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4243    __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4244    __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4245    __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4246    __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4247    __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4248    unlock_user_struct(target_msginfo, target_addr, 1);
4249    return 0;
4250}
4251
4252static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4253{
4254    struct msqid_ds dsarg;
4255    struct msginfo msginfo;
4256    abi_long ret = -TARGET_EINVAL;
4257
4258    cmd &= 0xff;
4259
4260    switch (cmd) {
4261    case IPC_STAT:
4262    case IPC_SET:
4263    case MSG_STAT:
4264        if (target_to_host_msqid_ds(&dsarg,ptr))
4265            return -TARGET_EFAULT;
4266        ret = get_errno(msgctl(msgid, cmd, &dsarg));
4267        if (host_to_target_msqid_ds(ptr,&dsarg))
4268            return -TARGET_EFAULT;
4269        break;
4270    case IPC_RMID:
4271        ret = get_errno(msgctl(msgid, cmd, NULL));
4272        break;
4273    case IPC_INFO:
4274    case MSG_INFO:
4275        ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4276        if (host_to_target_msginfo(ptr, &msginfo))
4277            return -TARGET_EFAULT;
4278        break;
4279    }
4280
4281    return ret;
4282}
4283
4284struct target_msgbuf {
4285    abi_long mtype;
4286    char        mtext[1];
4287};
4288
4289static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4290                                 ssize_t msgsz, int msgflg)
4291{
4292    struct target_msgbuf *target_mb;
4293    struct msgbuf *host_mb;
4294    abi_long ret = 0;
4295
4296    if (msgsz < 0) {
4297        return -TARGET_EINVAL;
4298    }
4299
4300    if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4301        return -TARGET_EFAULT;
4302    host_mb = g_try_malloc(msgsz + sizeof(long));
4303    if (!host_mb) {
4304        unlock_user_struct(target_mb, msgp, 0);
4305        return -TARGET_ENOMEM;
4306    }
4307    host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4308    memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4309    ret = -TARGET_ENOSYS;
4310#ifdef __NR_msgsnd
4311    ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4312#endif
4313#ifdef __NR_ipc
4314    if (ret == -TARGET_ENOSYS) {
4315#ifdef __s390x__
4316        ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4317                                 host_mb));
4318#else
4319        ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4320                                 host_mb, 0));
4321#endif
4322    }
4323#endif
4324    g_free(host_mb);
4325    unlock_user_struct(target_mb, msgp, 0);
4326
4327    return ret;
4328}
4329
4330#ifdef __NR_ipc
4331#if defined(__sparc__)
4332/* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4333#define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4334#elif defined(__s390x__)
4335/* The s390 sys_ipc variant has only five parameters.  */
4336#define MSGRCV_ARGS(__msgp, __msgtyp) \
4337    ((long int[]){(long int)__msgp, __msgtyp})
4338#else
4339#define MSGRCV_ARGS(__msgp, __msgtyp) \
4340    ((long int[]){(long int)__msgp, __msgtyp}), 0
4341#endif
4342#endif
4343
4344static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4345                                 ssize_t msgsz, abi_long msgtyp,
4346                                 int msgflg)
4347{
4348    struct target_msgbuf *target_mb;
4349    char *target_mtext;
4350    struct msgbuf *host_mb;
4351    abi_long ret = 0;
4352
4353    if (msgsz < 0) {
4354        return -TARGET_EINVAL;
4355    }
4356
4357    if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4358        return -TARGET_EFAULT;
4359
4360    host_mb = g_try_malloc(msgsz + sizeof(long));
4361    if (!host_mb) {
4362        ret = -TARGET_ENOMEM;
4363        goto end;
4364    }
4365    ret = -TARGET_ENOSYS;
4366#ifdef __NR_msgrcv
4367    ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4368#endif
4369#ifdef __NR_ipc
4370    if (ret == -TARGET_ENOSYS) {
4371        ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4372                        msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4373    }
4374#endif
4375
4376    if (ret > 0) {
4377        abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4378        target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4379        if (!target_mtext) {
4380            ret = -TARGET_EFAULT;
4381            goto end;
4382        }
4383        memcpy(target_mb->mtext, host_mb->mtext, ret);
4384        unlock_user(target_mtext, target_mtext_addr, ret);
4385    }
4386
4387    target_mb->mtype = tswapal(host_mb->mtype);
4388
4389end:
4390    if (target_mb)
4391        unlock_user_struct(target_mb, msgp, 1);
4392    g_free(host_mb);
4393    return ret;
4394}
4395
4396static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4397                                               abi_ulong target_addr)
4398{
4399    struct target_shmid_ds *target_sd;
4400
4401    if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4402        return -TARGET_EFAULT;
4403    if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4404        return -TARGET_EFAULT;
4405    __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4406    __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4407    __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4408    __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4409    __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4410    __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4411    __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4412    unlock_user_struct(target_sd, target_addr, 0);
4413    return 0;
4414}
4415
4416static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4417                                               struct shmid_ds *host_sd)
4418{
4419    struct target_shmid_ds *target_sd;
4420
4421    if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4422        return -TARGET_EFAULT;
4423    if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4424        return -TARGET_EFAULT;
4425    __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4426    __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4427    __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4428    __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4429    __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4430    __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4431    __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4432    unlock_user_struct(target_sd, target_addr, 1);
4433    return 0;
4434}
4435
4436struct  target_shminfo {
4437    abi_ulong shmmax;
4438    abi_ulong shmmin;
4439    abi_ulong shmmni;
4440    abi_ulong shmseg;
4441    abi_ulong shmall;
4442};
4443
4444static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4445                                              struct shminfo *host_shminfo)
4446{
4447    struct target_shminfo *target_shminfo;
4448    if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4449        return -TARGET_EFAULT;
4450    __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4451    __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4452    __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4453    __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4454    __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4455    unlock_user_struct(target_shminfo, target_addr, 1);
4456    return 0;
4457}
4458
4459struct target_shm_info {
4460    int used_ids;
4461    abi_ulong shm_tot;
4462    abi_ulong shm_rss;
4463    abi_ulong shm_swp;
4464    abi_ulong swap_attempts;
4465    abi_ulong swap_successes;
4466};
4467
4468static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4469                                               struct shm_info *host_shm_info)
4470{
4471    struct target_shm_info *target_shm_info;
4472    if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4473        return -TARGET_EFAULT;
4474    __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4475    __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4476    __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4477    __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4478    __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4479    __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4480    unlock_user_struct(target_shm_info, target_addr, 1);
4481    return 0;
4482}
4483
4484static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4485{
4486    struct shmid_ds dsarg;
4487    struct shminfo shminfo;
4488    struct shm_info shm_info;
4489    abi_long ret = -TARGET_EINVAL;
4490
4491    cmd &= 0xff;
4492
4493    switch(cmd) {
4494    case IPC_STAT:
4495    case IPC_SET:
4496    case SHM_STAT:
4497        if (target_to_host_shmid_ds(&dsarg, buf))
4498            return -TARGET_EFAULT;
4499        ret = get_errno(shmctl(shmid, cmd, &dsarg));
4500        if (host_to_target_shmid_ds(buf, &dsarg))
4501            return -TARGET_EFAULT;
4502        break;
4503    case IPC_INFO:
4504        ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4505        if (host_to_target_shminfo(buf, &shminfo))
4506            return -TARGET_EFAULT;
4507        break;
4508    case SHM_INFO:
4509        ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4510        if (host_to_target_shm_info(buf, &shm_info))
4511            return -TARGET_EFAULT;
4512        break;
4513    case IPC_RMID:
4514    case SHM_LOCK:
4515    case SHM_UNLOCK:
4516        ret = get_errno(shmctl(shmid, cmd, NULL));
4517        break;
4518    }
4519
4520    return ret;
4521}
4522
4523#ifndef TARGET_FORCE_SHMLBA
4524/* For most architectures, SHMLBA is the same as the page size;
4525 * some architectures have larger values, in which case they should
4526 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4527 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4528 * and defining its own value for SHMLBA.
4529 *
4530 * The kernel also permits SHMLBA to be set by the architecture to a
4531 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4532 * this means that addresses are rounded to the large size if
4533 * SHM_RND is set but addresses not aligned to that size are not rejected
4534 * as long as they are at least page-aligned. Since the only architecture
4535 * which uses this is ia64 this code doesn't provide for that oddity.
4536 */
4537static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4538{
4539    return TARGET_PAGE_SIZE;
4540}
4541#endif
4542
4543static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4544                                 int shmid, abi_ulong shmaddr, int shmflg)
4545{
4546    CPUState *cpu = env_cpu(cpu_env);
4547    abi_long raddr;
4548    void *host_raddr;
4549    struct shmid_ds shm_info;
4550    int i,ret;
4551    abi_ulong shmlba;
4552
4553    /* shmat pointers are always untagged */
4554
4555    /* find out the length of the shared memory segment */
4556    ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4557    if (is_error(ret)) {
4558        /* can't get length, bail out */
4559        return ret;
4560    }
4561
4562    shmlba = target_shmlba(cpu_env);
4563
4564    if (shmaddr & (shmlba - 1)) {
4565        if (shmflg & SHM_RND) {
4566            shmaddr &= ~(shmlba - 1);
4567        } else {
4568            return -TARGET_EINVAL;
4569        }
4570    }
4571    if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4572        return -TARGET_EINVAL;
4573    }
4574
4575    mmap_lock();
4576
4577    /*
4578     * We're mapping shared memory, so ensure we generate code for parallel
4579     * execution and flush old translations.  This will work up to the level
4580     * supported by the host -- anything that requires EXCP_ATOMIC will not
4581     * be atomic with respect to an external process.
4582     */
4583    if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4584        cpu->tcg_cflags |= CF_PARALLEL;
4585        tb_flush(cpu);
4586    }
4587
4588    if (shmaddr)
4589        host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4590    else {
4591        abi_ulong mmap_start;
4592
4593        /* In order to use the host shmat, we need to honor host SHMLBA.  */
4594        mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4595
4596        if (mmap_start == -1) {
4597            errno = ENOMEM;
4598            host_raddr = (void *)-1;
4599        } else
4600            host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4601                               shmflg | SHM_REMAP);
4602    }
4603
4604    if (host_raddr == (void *)-1) {
4605        mmap_unlock();
4606        return get_errno((long)host_raddr);
4607    }
4608    raddr=h2g((unsigned long)host_raddr);
4609
4610    page_set_flags(raddr, raddr + shm_info.shm_segsz,
4611                   PAGE_VALID | PAGE_RESET | PAGE_READ |
4612                   (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4613
4614    for (i = 0; i < N_SHM_REGIONS; i++) {
4615        if (!shm_regions[i].in_use) {
4616            shm_regions[i].in_use = true;
4617            shm_regions[i].start = raddr;
4618            shm_regions[i].size = shm_info.shm_segsz;
4619            break;
4620        }
4621    }
4622
4623    mmap_unlock();
4624    return raddr;
4625
4626}
4627
4628static inline abi_long do_shmdt(abi_ulong shmaddr)
4629{
4630    int i;
4631    abi_long rv;
4632
4633    /* shmdt pointers are always untagged */
4634
4635    mmap_lock();
4636
4637    for (i = 0; i < N_SHM_REGIONS; ++i) {
4638        if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4639            shm_regions[i].in_use = false;
4640            page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4641            break;
4642        }
4643    }
4644    rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4645
4646    mmap_unlock();
4647
4648    return rv;
4649}
4650
4651#ifdef TARGET_NR_ipc
4652/* ??? This only works with linear mappings.  */
4653/* do_ipc() must return target values and target errnos. */
4654static abi_long do_ipc(CPUArchState *cpu_env,
4655                       unsigned int call, abi_long first,
4656                       abi_long second, abi_long third,
4657                       abi_long ptr, abi_long fifth)
4658{
4659    int version;
4660    abi_long ret = 0;
4661
4662    version = call >> 16;
4663    call &= 0xffff;
4664
4665    switch (call) {
4666    case IPCOP_semop:
4667        ret = do_semtimedop(first, ptr, second, 0, false);
4668        break;
4669    case IPCOP_semtimedop:
4670    /*
4671     * The s390 sys_ipc variant has only five parameters instead of six
4672     * (as for default variant) and the only difference is the handling of
4673     * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4674     * to a struct timespec where the generic variant uses fifth parameter.
4675     */
4676#if defined(TARGET_S390X)
4677        ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4678#else
4679        ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4680#endif
4681        break;
4682
4683    case IPCOP_semget:
4684        ret = get_errno(semget(first, second, third));
4685        break;
4686
4687    case IPCOP_semctl: {
4688        /* The semun argument to semctl is passed by value, so dereference the
4689         * ptr argument. */
4690        abi_ulong atptr;
4691        get_user_ual(atptr, ptr);
4692        ret = do_semctl(first, second, third, atptr);
4693        break;
4694    }
4695
4696    case IPCOP_msgget:
4697        ret = get_errno(msgget(first, second));
4698        break;
4699
4700    case IPCOP_msgsnd:
4701        ret = do_msgsnd(first, ptr, second, third);
4702        break;
4703
4704    case IPCOP_msgctl:
4705        ret = do_msgctl(first, second, ptr);
4706        break;
4707
4708    case IPCOP_msgrcv:
4709        switch (version) {
4710        case 0:
4711            {
4712                struct target_ipc_kludge {
4713                    abi_long msgp;
4714                    abi_long msgtyp;
4715                } *tmp;
4716
4717                if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4718                    ret = -TARGET_EFAULT;
4719                    break;
4720                }
4721
4722                ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4723
4724                unlock_user_struct(tmp, ptr, 0);
4725                break;
4726            }
4727        default:
4728            ret = do_msgrcv(first, ptr, second, fifth, third);
4729        }
4730        break;
4731
4732    case IPCOP_shmat:
4733        switch (version) {
4734        default:
4735        {
4736            abi_ulong raddr;
4737            raddr = do_shmat(cpu_env, first, ptr, second);
4738            if (is_error(raddr))
4739                return get_errno(raddr);
4740            if (put_user_ual(raddr, third))
4741                return -TARGET_EFAULT;
4742            break;
4743        }
4744        case 1:
4745            ret = -TARGET_EINVAL;
4746            break;
4747        }
4748        break;
4749    case IPCOP_shmdt:
4750        ret = do_shmdt(ptr);
4751        break;
4752
4753    case IPCOP_shmget:
4754        /* IPC_* flag values are the same on all linux platforms */
4755        ret = get_errno(shmget(first, second, third));
4756        break;
4757
4758        /* IPC_* and SHM_* command values are the same on all linux platforms */
4759    case IPCOP_shmctl:
4760        ret = do_shmctl(first, second, ptr);
4761        break;
4762    default:
4763        qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4764                      call, version);
4765        ret = -TARGET_ENOSYS;
4766        break;
4767    }
4768    return ret;
4769}
4770#endif
4771
4772/* kernel structure types definitions */
4773
4774#define STRUCT(name, ...) STRUCT_ ## name,
4775#define STRUCT_SPECIAL(name) STRUCT_ ## name,
4776enum {
4777#include "syscall_types.h"
4778STRUCT_MAX
4779};
4780#undef STRUCT
4781#undef STRUCT_SPECIAL
4782
4783#define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4784#define STRUCT_SPECIAL(name)
4785#include "syscall_types.h"
4786#undef STRUCT
4787#undef STRUCT_SPECIAL
4788
4789#define MAX_STRUCT_SIZE 4096
4790
4791#ifdef CONFIG_FIEMAP
4792/* So fiemap access checks don't overflow on 32 bit systems.
4793 * This is very slightly smaller than the limit imposed by
4794 * the underlying kernel.
4795 */
4796#define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4797                            / sizeof(struct fiemap_extent))
4798
4799static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4800                                       int fd, int cmd, abi_long arg)
4801{
4802    /* The parameter for this ioctl is a struct fiemap followed
4803     * by an array of struct fiemap_extent whose size is set
4804     * in fiemap->fm_extent_count. The array is filled in by the
4805     * ioctl.
4806     */
4807    int target_size_in, target_size_out;
4808    struct fiemap *fm;
4809    const argtype *arg_type = ie->arg_type;
4810    const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4811    void *argptr, *p;
4812    abi_long ret;
4813    int i, extent_size = thunk_type_size(extent_arg_type, 0);
4814    uint32_t outbufsz;
4815    int free_fm = 0;
4816
4817    assert(arg_type[0] == TYPE_PTR);
4818    assert(ie->access == IOC_RW);
4819    arg_type++;
4820    target_size_in = thunk_type_size(arg_type, 0);
4821    argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4822    if (!argptr) {
4823        return -TARGET_EFAULT;
4824    }
4825    thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4826    unlock_user(argptr, arg, 0);
4827    fm = (struct fiemap *)buf_temp;
4828    if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4829        return -TARGET_EINVAL;
4830    }
4831
4832    outbufsz = sizeof (*fm) +
4833        (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4834
4835    if (outbufsz > MAX_STRUCT_SIZE) {
4836        /* We can't fit all the extents into the fixed size buffer.
4837         * Allocate one that is large enough and use it instead.
4838         */
4839        fm = g_try_malloc(outbufsz);
4840        if (!fm) {
4841            return -TARGET_ENOMEM;
4842        }
4843        memcpy(fm, buf_temp, sizeof(struct fiemap));
4844        free_fm = 1;
4845    }
4846    ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4847    if (!is_error(ret)) {
4848        target_size_out = target_size_in;
4849        /* An extent_count of 0 means we were only counting the extents
4850         * so there are no structs to copy
4851         */
4852        if (fm->fm_extent_count != 0) {
4853            target_size_out += fm->fm_mapped_extents * extent_size;
4854        }
4855        argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4856        if (!argptr) {
4857            ret = -TARGET_EFAULT;
4858        } else {
4859            /* Convert the struct fiemap */
4860            thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4861            if (fm->fm_extent_count != 0) {
4862                p = argptr + target_size_in;
4863                /* ...and then all the struct fiemap_extents */
4864                for (i = 0; i < fm->fm_mapped_extents; i++) {
4865                    thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4866                                  THUNK_TARGET);
4867                    p += extent_size;
4868                }
4869            }
4870            unlock_user(argptr, arg, target_size_out);
4871        }
4872    }
4873    if (free_fm) {
4874        g_free(fm);
4875    }
4876    return ret;
4877}
4878#endif
4879
4880static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4881                                int fd, int cmd, abi_long arg)
4882{
4883    const argtype *arg_type = ie->arg_type;
4884    int target_size;
4885    void *argptr;
4886    int ret;
4887    struct ifconf *host_ifconf;
4888    uint32_t outbufsz;
4889    const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4890    const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4891    int target_ifreq_size;
4892    int nb_ifreq;
4893    int free_buf = 0;
4894    int i;
4895    int target_ifc_len;
4896    abi_long target_ifc_buf;
4897    int host_ifc_len;
4898    char *host_ifc_buf;
4899
4900    assert(arg_type[0] == TYPE_PTR);
4901    assert(ie->access == IOC_RW);
4902
4903    arg_type++;
4904    target_size = thunk_type_size(arg_type, 0);
4905
4906    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4907    if (!argptr)
4908        return -TARGET_EFAULT;
4909    thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4910    unlock_user(argptr, arg, 0);
4911
4912    host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4913    target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4914    target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4915
4916    if (target_ifc_buf != 0) {
4917        target_ifc_len = host_ifconf->ifc_len;
4918        nb_ifreq = target_ifc_len / target_ifreq_size;
4919        host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4920
4921        outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4922        if (outbufsz > MAX_STRUCT_SIZE) {
4923            /*
4924             * We can't fit all the extents into the fixed size buffer.
4925             * Allocate one that is large enough and use it instead.
4926             */
4927            host_ifconf = g_try_malloc(outbufsz);
4928            if (!host_ifconf) {
4929                return -TARGET_ENOMEM;
4930            }
4931            memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4932            free_buf = 1;
4933        }
4934        host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4935
4936        host_ifconf->ifc_len = host_ifc_len;
4937    } else {
4938      host_ifc_buf = NULL;
4939    }
4940    host_ifconf->ifc_buf = host_ifc_buf;
4941
4942    ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4943    if (!is_error(ret)) {
4944        /* convert host ifc_len to target ifc_len */
4945
4946        nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4947        target_ifc_len = nb_ifreq * target_ifreq_size;
4948        host_ifconf->ifc_len = target_ifc_len;
4949
4950        /* restore target ifc_buf */
4951
4952        host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4953
4954        /* copy struct ifconf to target user */
4955
4956        argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4957        if (!argptr)
4958            return -TARGET_EFAULT;
4959        thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4960        unlock_user(argptr, arg, target_size);
4961
4962        if (target_ifc_buf != 0) {
4963            /* copy ifreq[] to target user */
4964            argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4965            for (i = 0; i < nb_ifreq ; i++) {
4966                thunk_convert(argptr + i * target_ifreq_size,
4967                              host_ifc_buf + i * sizeof(struct ifreq),
4968                              ifreq_arg_type, THUNK_TARGET);
4969            }
4970            unlock_user(argptr, target_ifc_buf, target_ifc_len);
4971        }
4972    }
4973
4974    if (free_buf) {
4975        g_free(host_ifconf);
4976    }
4977
4978    return ret;
4979}
4980
4981#if defined(CONFIG_USBFS)
4982#if HOST_LONG_BITS > 64
4983#error USBDEVFS thunks do not support >64 bit hosts yet.
4984#endif
4985struct live_urb {
4986    uint64_t target_urb_adr;
4987    uint64_t target_buf_adr;
4988    char *target_buf_ptr;
4989    struct usbdevfs_urb host_urb;
4990};
4991
4992static GHashTable *usbdevfs_urb_hashtable(void)
4993{
4994    static GHashTable *urb_hashtable;
4995
4996    if (!urb_hashtable) {
4997        urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4998    }
4999    return urb_hashtable;
5000}
5001
5002static void urb_hashtable_insert(struct live_urb *urb)
5003{
5004    GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5005    g_hash_table_insert(urb_hashtable, urb, urb);
5006}
5007
5008static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5009{
5010    GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5011    return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5012}
5013
5014static void urb_hashtable_remove(struct live_urb *urb)
5015{
5016    GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5017    g_hash_table_remove(urb_hashtable, urb);
5018}
5019
5020static abi_long
5021do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5022                          int fd, int cmd, abi_long arg)
5023{
5024    const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5025    const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5026    struct live_urb *lurb;
5027    void *argptr;
5028    uint64_t hurb;
5029    int target_size;
5030    uintptr_t target_urb_adr;
5031    abi_long ret;
5032
5033    target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5034
5035    memset(buf_temp, 0, sizeof(uint64_t));
5036    ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5037    if (is_error(ret)) {
5038        return ret;
5039    }
5040
5041    memcpy(&hurb, buf_temp, sizeof(uint64_t));
5042    lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5043    if (!lurb->target_urb_adr) {
5044        return -TARGET_EFAULT;
5045    }
5046    urb_hashtable_remove(lurb);
5047    unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5048        lurb->host_urb.buffer_length);
5049    lurb->target_buf_ptr = NULL;
5050
5051    /* restore the guest buffer pointer */
5052    lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5053
5054    /* update the guest urb struct */
5055    argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5056    if (!argptr) {
5057        g_free(lurb);
5058        return -TARGET_EFAULT;
5059    }
5060    thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5061    unlock_user(argptr, lurb->target_urb_adr, target_size);
5062
5063    target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5064    /* write back the urb handle */
5065    argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5066    if (!argptr) {
5067        g_free(lurb);
5068        return -TARGET_EFAULT;
5069    }
5070
5071    /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5072    target_urb_adr = lurb->target_urb_adr;
5073    thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5074    unlock_user(argptr, arg, target_size);
5075
5076    g_free(lurb);
5077    return ret;
5078}
5079
5080static abi_long
5081do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5082                             uint8_t *buf_temp __attribute__((unused)),
5083                             int fd, int cmd, abi_long arg)
5084{
5085    struct live_urb *lurb;
5086
5087    /* map target address back to host URB with metadata. */
5088    lurb = urb_hashtable_lookup(arg);
5089    if (!lurb) {
5090        return -TARGET_EFAULT;
5091    }
5092    return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5093}
5094
5095static abi_long
5096do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5097                            int fd, int cmd, abi_long arg)
5098{
5099    const argtype *arg_type = ie->arg_type;
5100    int target_size;
5101    abi_long ret;
5102    void *argptr;
5103    int rw_dir;
5104    struct live_urb *lurb;
5105
5106    /*
5107     * each submitted URB needs to map to a unique ID for the
5108     * kernel, and that unique ID needs to be a pointer to
5109     * host memory.  hence, we need to malloc for each URB.
5110     * isochronous transfers have a variable length struct.
5111     */
5112    arg_type++;
5113    target_size = thunk_type_size(arg_type, THUNK_TARGET);
5114
5115    /* construct host copy of urb and metadata */
5116    lurb = g_try_new0(struct live_urb, 1);
5117    if (!lurb) {
5118        return -TARGET_ENOMEM;
5119    }
5120
5121    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5122    if (!argptr) {
5123        g_free(lurb);
5124        return -TARGET_EFAULT;
5125    }
5126    thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5127    unlock_user(argptr, arg, 0);
5128
5129    lurb->target_urb_adr = arg;
5130    lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5131
5132    /* buffer space used depends on endpoint type so lock the entire buffer */
5133    /* control type urbs should check the buffer contents for true direction */
5134    rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5135    lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5136        lurb->host_urb.buffer_length, 1);
5137    if (lurb->target_buf_ptr == NULL) {
5138        g_free(lurb);
5139        return -TARGET_EFAULT;
5140    }
5141
5142    /* update buffer pointer in host copy */
5143    lurb->host_urb.buffer = lurb->target_buf_ptr;
5144
5145    ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5146    if (is_error(ret)) {
5147        unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5148        g_free(lurb);
5149    } else {
5150        urb_hashtable_insert(lurb);
5151    }
5152
5153    return ret;
5154}
5155#endif /* CONFIG_USBFS */
5156
5157static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5158                            int cmd, abi_long arg)
5159{
5160    void *argptr;
5161    struct dm_ioctl *host_dm;
5162    abi_long guest_data;
5163    uint32_t guest_data_size;
5164    int target_size;
5165    const argtype *arg_type = ie->arg_type;
5166    abi_long ret;
5167    void *big_buf = NULL;
5168    char *host_data;
5169
5170    arg_type++;
5171    target_size = thunk_type_size(arg_type, 0);
5172    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5173    if (!argptr) {
5174        ret = -TARGET_EFAULT;
5175        goto out;
5176    }
5177    thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5178    unlock_user(argptr, arg, 0);
5179
5180    /* buf_temp is too small, so fetch things into a bigger buffer */
5181    big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5182    memcpy(big_buf, buf_temp, target_size);
5183    buf_temp = big_buf;
5184    host_dm = big_buf;
5185
5186    guest_data = arg + host_dm->data_start;
5187    if ((guest_data - arg) < 0) {
5188        ret = -TARGET_EINVAL;
5189        goto out;
5190    }
5191    guest_data_size = host_dm->data_size - host_dm->data_start;
5192    host_data = (char*)host_dm + host_dm->data_start;
5193
5194    argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5195    if (!argptr) {
5196        ret = -TARGET_EFAULT;
5197        goto out;
5198    }
5199
5200    switch (ie->host_cmd) {
5201    case DM_REMOVE_ALL:
5202    case DM_LIST_DEVICES:
5203    case DM_DEV_CREATE:
5204    case DM_DEV_REMOVE:
5205    case DM_DEV_SUSPEND:
5206    case DM_DEV_STATUS:
5207    case DM_DEV_WAIT:
5208    case DM_TABLE_STATUS:
5209    case DM_TABLE_CLEAR:
5210    case DM_TABLE_DEPS:
5211    case DM_LIST_VERSIONS:
5212        /* no input data */
5213        break;
5214    case DM_DEV_RENAME:
5215    case DM_DEV_SET_GEOMETRY:
5216        /* data contains only strings */
5217        memcpy(host_data, argptr, guest_data_size);
5218        break;
5219    case DM_TARGET_MSG:
5220        memcpy(host_data, argptr, guest_data_size);
5221        *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5222        break;
5223    case DM_TABLE_LOAD:
5224    {
5225        void *gspec = argptr;
5226        void *cur_data = host_data;
5227        const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5228        int spec_size = thunk_type_size(arg_type, 0);
5229        int i;
5230
5231        for (i = 0; i < host_dm->target_count; i++) {
5232            struct dm_target_spec *spec = cur_data;
5233            uint32_t next;
5234            int slen;
5235
5236            thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5237            slen = strlen((char*)gspec + spec_size) + 1;
5238            next = spec->next;
5239            spec->next = sizeof(*spec) + slen;
5240            strcpy((char*)&spec[1], gspec + spec_size);
5241            gspec += next;
5242            cur_data += spec->next;
5243        }
5244        break;
5245    }
5246    default:
5247        ret = -TARGET_EINVAL;
5248        unlock_user(argptr, guest_data, 0);
5249        goto out;
5250    }
5251    unlock_user(argptr, guest_data, 0);
5252
5253    ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5254    if (!is_error(ret)) {
5255        guest_data = arg + host_dm->data_start;
5256        guest_data_size = host_dm->data_size - host_dm->data_start;
5257        argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5258        switch (ie->host_cmd) {
5259        case DM_REMOVE_ALL:
5260        case DM_DEV_CREATE:
5261        case DM_DEV_REMOVE:
5262        case DM_DEV_RENAME:
5263        case DM_DEV_SUSPEND:
5264        case DM_DEV_STATUS:
5265        case DM_TABLE_LOAD:
5266        case DM_TABLE_CLEAR:
5267        case DM_TARGET_MSG:
5268        case DM_DEV_SET_GEOMETRY:
5269            /* no return data */
5270            break;
5271        case DM_LIST_DEVICES:
5272        {
5273            struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5274            uint32_t remaining_data = guest_data_size;
5275            void *cur_data = argptr;
5276            const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5277            int nl_size = 12; /* can't use thunk_size due to alignment */
5278
5279            while (1) {
5280                uint32_t next = nl->next;
5281                if (next) {
5282                    nl->next = nl_size + (strlen(nl->name) + 1);
5283                }
5284                if (remaining_data < nl->next) {
5285                    host_dm->flags |= DM_BUFFER_FULL_FLAG;
5286                    break;
5287                }
5288                thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5289                strcpy(cur_data + nl_size, nl->name);
5290                cur_data += nl->next;
5291                remaining_data -= nl->next;
5292                if (!next) {
5293                    break;
5294                }
5295                nl = (void*)nl + next;
5296            }
5297            break;
5298        }
5299        case DM_DEV_WAIT:
5300        case DM_TABLE_STATUS:
5301        {
5302            struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5303            void *cur_data = argptr;
5304            const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5305            int spec_size = thunk_type_size(arg_type, 0);
5306            int i;
5307
5308            for (i = 0; i < host_dm->target_count; i++) {
5309                uint32_t next = spec->next;
5310                int slen = strlen((char*)&spec[1]) + 1;
5311                spec->next = (cur_data - argptr) + spec_size + slen;
5312                if (guest_data_size < spec->next) {
5313                    host_dm->flags |= DM_BUFFER_FULL_FLAG;
5314                    break;
5315                }
5316                thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5317                strcpy(cur_data + spec_size, (char*)&spec[1]);
5318                cur_data = argptr + spec->next;
5319                spec = (void*)host_dm + host_dm->data_start + next;
5320            }
5321            break;
5322        }
5323        case DM_TABLE_DEPS:
5324        {
5325            void *hdata = (void*)host_dm + host_dm->data_start;
5326            int count = *(uint32_t*)hdata;
5327            uint64_t *hdev = hdata + 8;
5328            uint64_t *gdev = argptr + 8;
5329            int i;
5330
5331            *(uint32_t*)argptr = tswap32(count);
5332            for (i = 0; i < count; i++) {
5333                *gdev = tswap64(*hdev);
5334                gdev++;
5335                hdev++;
5336            }
5337            break;
5338        }
5339        case DM_LIST_VERSIONS:
5340        {
5341            struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5342            uint32_t remaining_data = guest_data_size;
5343            void *cur_data = argptr;
5344            const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5345            int vers_size = thunk_type_size(arg_type, 0);
5346
5347            while (1) {
5348                uint32_t next = vers->next;
5349                if (next) {
5350                    vers->next = vers_size + (strlen(vers->name) + 1);
5351                }
5352                if (remaining_data < vers->next) {
5353                    host_dm->flags |= DM_BUFFER_FULL_FLAG;
5354                    break;
5355                }
5356                thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5357                strcpy(cur_data + vers_size, vers->name);
5358                cur_data += vers->next;
5359                remaining_data -= vers->next;
5360                if (!next) {
5361                    break;
5362                }
5363                vers = (void*)vers + next;
5364            }
5365            break;
5366        }
5367        default:
5368            unlock_user(argptr, guest_data, 0);
5369            ret = -TARGET_EINVAL;
5370            goto out;
5371        }
5372        unlock_user(argptr, guest_data, guest_data_size);
5373
5374        argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5375        if (!argptr) {
5376            ret = -TARGET_EFAULT;
5377            goto out;
5378        }
5379        thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5380        unlock_user(argptr, arg, target_size);
5381    }
5382out:
5383    g_free(big_buf);
5384    return ret;
5385}
5386
5387static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5388                               int cmd, abi_long arg)
5389{
5390    void *argptr;
5391    int target_size;
5392    const argtype *arg_type = ie->arg_type;
5393    const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5394    abi_long ret;
5395
5396    struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5397    struct blkpg_partition host_part;
5398
5399    /* Read and convert blkpg */
5400    arg_type++;
5401    target_size = thunk_type_size(arg_type, 0);
5402    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5403    if (!argptr) {
5404        ret = -TARGET_EFAULT;
5405        goto out;
5406    }
5407    thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5408    unlock_user(argptr, arg, 0);
5409
5410    switch (host_blkpg->op) {
5411    case BLKPG_ADD_PARTITION:
5412    case BLKPG_DEL_PARTITION:
5413        /* payload is struct blkpg_partition */
5414        break;
5415    default:
5416        /* Unknown opcode */
5417        ret = -TARGET_EINVAL;
5418        goto out;
5419    }
5420
5421    /* Read and convert blkpg->data */
5422    arg = (abi_long)(uintptr_t)host_blkpg->data;
5423    target_size = thunk_type_size(part_arg_type, 0);
5424    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5425    if (!argptr) {
5426        ret = -TARGET_EFAULT;
5427        goto out;
5428    }
5429    thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5430    unlock_user(argptr, arg, 0);
5431
5432    /* Swizzle the data pointer to our local copy and call! */
5433    host_blkpg->data = &host_part;
5434    ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5435
5436out:
5437    return ret;
5438}
5439
5440static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5441                                int fd, int cmd, abi_long arg)
5442{
5443    const argtype *arg_type = ie->arg_type;
5444    const StructEntry *se;
5445    const argtype *field_types;
5446    const int *dst_offsets, *src_offsets;
5447    int target_size;
5448    void *argptr;
5449    abi_ulong *target_rt_dev_ptr = NULL;
5450    unsigned long *host_rt_dev_ptr = NULL;
5451    abi_long ret;
5452    int i;
5453
5454    assert(ie->access == IOC_W);
5455    assert(*arg_type == TYPE_PTR);
5456    arg_type++;
5457    assert(*arg_type == TYPE_STRUCT);
5458    target_size = thunk_type_size(arg_type, 0);
5459    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5460    if (!argptr) {
5461        return -TARGET_EFAULT;
5462    }
5463    arg_type++;
5464    assert(*arg_type == (int)STRUCT_rtentry);
5465    se = struct_entries + *arg_type++;
5466    assert(se->convert[0] == NULL);
5467    /* convert struct here to be able to catch rt_dev string */
5468    field_types = se->field_types;
5469    dst_offsets = se->field_offsets[THUNK_HOST];
5470    src_offsets = se->field_offsets[THUNK_TARGET];
5471    for (i = 0; i < se->nb_fields; i++) {
5472        if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5473            assert(*field_types == TYPE_PTRVOID);
5474            target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5475            host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5476            if (*target_rt_dev_ptr != 0) {
5477                *host_rt_dev_ptr = (unsigned long)lock_user_string(
5478                                                  tswapal(*target_rt_dev_ptr));
5479                if (!*host_rt_dev_ptr) {
5480                    unlock_user(argptr, arg, 0);
5481                    return -TARGET_EFAULT;
5482                }
5483            } else {
5484                *host_rt_dev_ptr = 0;
5485            }
5486            field_types++;
5487            continue;
5488        }
5489        field_types = thunk_convert(buf_temp + dst_offsets[i],
5490                                    argptr + src_offsets[i],
5491                                    field_types, THUNK_HOST);
5492    }
5493    unlock_user(argptr, arg, 0);
5494
5495    ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5496
5497    assert(host_rt_dev_ptr != NULL);
5498    assert(target_rt_dev_ptr != NULL);
5499    if (*host_rt_dev_ptr != 0) {
5500        unlock_user((void *)*host_rt_dev_ptr,
5501                    *target_rt_dev_ptr, 0);
5502    }
5503    return ret;
5504}
5505
5506static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5507                                     int fd, int cmd, abi_long arg)
5508{
5509    int sig = target_to_host_signal(arg);
5510    return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5511}
5512
5513static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5514                                    int fd, int cmd, abi_long arg)
5515{
5516    struct timeval tv;
5517    abi_long ret;
5518
5519    ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5520    if (is_error(ret)) {
5521        return ret;
5522    }
5523
5524    if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5525        if (copy_to_user_timeval(arg, &tv)) {
5526            return -TARGET_EFAULT;
5527        }
5528    } else {
5529        if (copy_to_user_timeval64(arg, &tv)) {
5530            return -TARGET_EFAULT;
5531        }
5532    }
5533
5534    return ret;
5535}
5536
5537static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5538                                      int fd, int cmd, abi_long arg)
5539{
5540    struct timespec ts;
5541    abi_long ret;
5542
5543    ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5544    if (is_error(ret)) {
5545        return ret;
5546    }
5547
5548    if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5549        if (host_to_target_timespec(arg, &ts)) {
5550            return -TARGET_EFAULT;
5551        }
5552    } else{
5553        if (host_to_target_timespec64(arg, &ts)) {
5554            return -TARGET_EFAULT;
5555        }
5556    }
5557
5558    return ret;
5559}
5560
5561#ifdef TIOCGPTPEER
5562static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5563                                     int fd, int cmd, abi_long arg)
5564{
5565    int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5566    return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5567}
5568#endif
5569
5570#ifdef HAVE_DRM_H
5571
5572static void unlock_drm_version(struct drm_version *host_ver,
5573                               struct target_drm_version *target_ver,
5574                               bool copy)
5575{
5576    unlock_user(host_ver->name, target_ver->name,
5577                                copy ? host_ver->name_len : 0);
5578    unlock_user(host_ver->date, target_ver->date,
5579                                copy ? host_ver->date_len : 0);
5580    unlock_user(host_ver->desc, target_ver->desc,
5581                                copy ? host_ver->desc_len : 0);
5582}
5583
5584static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5585                                          struct target_drm_version *target_ver)
5586{
5587    memset(host_ver, 0, sizeof(*host_ver));
5588
5589    __get_user(host_ver->name_len, &target_ver->name_len);
5590    if (host_ver->name_len) {
5591        host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5592                                   target_ver->name_len, 0);
5593        if (!host_ver->name) {
5594            return -EFAULT;
5595        }
5596    }
5597
5598    __get_user(host_ver->date_len, &target_ver->date_len);
5599    if (host_ver->date_len) {
5600        host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5601                                   target_ver->date_len, 0);
5602        if (!host_ver->date) {
5603            goto err;
5604        }
5605    }
5606
5607    __get_user(host_ver->desc_len, &target_ver->desc_len);
5608    if (host_ver->desc_len) {
5609        host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5610                                   target_ver->desc_len, 0);
5611        if (!host_ver->desc) {
5612            goto err;
5613        }
5614    }
5615
5616    return 0;
5617err:
5618    unlock_drm_version(host_ver, target_ver, false);
5619    return -EFAULT;
5620}
5621
5622static inline void host_to_target_drmversion(
5623                                          struct target_drm_version *target_ver,
5624                                          struct drm_version *host_ver)
5625{
5626    __put_user(host_ver->version_major, &target_ver->version_major);
5627    __put_user(host_ver->version_minor, &target_ver->version_minor);
5628    __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5629    __put_user(host_ver->name_len, &target_ver->name_len);
5630    __put_user(host_ver->date_len, &target_ver->date_len);
5631    __put_user(host_ver->desc_len, &target_ver->desc_len);
5632    unlock_drm_version(host_ver, target_ver, true);
5633}
5634
5635static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5636                             int fd, int cmd, abi_long arg)
5637{
5638    struct drm_version *ver;
5639    struct target_drm_version *target_ver;
5640    abi_long ret;
5641
5642    switch (ie->host_cmd) {
5643    case DRM_IOCTL_VERSION:
5644        if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5645            return -TARGET_EFAULT;
5646        }
5647        ver = (struct drm_version *)buf_temp;
5648        ret = target_to_host_drmversion(ver, target_ver);
5649        if (!is_error(ret)) {
5650            ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5651            if (is_error(ret)) {
5652                unlock_drm_version(ver, target_ver, false);
5653            } else {
5654                host_to_target_drmversion(target_ver, ver);
5655            }
5656        }
5657        unlock_user_struct(target_ver, arg, 0);
5658        return ret;
5659    }
5660    return -TARGET_ENOSYS;
5661}
5662
5663static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5664                                           struct drm_i915_getparam *gparam,
5665                                           int fd, abi_long arg)
5666{
5667    abi_long ret;
5668    int value;
5669    struct target_drm_i915_getparam *target_gparam;
5670
5671    if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5672        return -TARGET_EFAULT;
5673    }
5674
5675    __get_user(gparam->param, &target_gparam->param);
5676    gparam->value = &value;
5677    ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5678    put_user_s32(value, target_gparam->value);
5679
5680    unlock_user_struct(target_gparam, arg, 0);
5681    return ret;
5682}
5683
5684static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5685                                  int fd, int cmd, abi_long arg)
5686{
5687    switch (ie->host_cmd) {
5688    case DRM_IOCTL_I915_GETPARAM:
5689        return do_ioctl_drm_i915_getparam(ie,
5690                                          (struct drm_i915_getparam *)buf_temp,
5691                                          fd, arg);
5692    default:
5693        return -TARGET_ENOSYS;
5694    }
5695}
5696
5697#endif
5698
5699static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5700                                        int fd, int cmd, abi_long arg)
5701{
5702    struct tun_filter *filter = (struct tun_filter *)buf_temp;
5703    struct tun_filter *target_filter;
5704    char *target_addr;
5705
5706    assert(ie->access == IOC_W);
5707
5708    target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5709    if (!target_filter) {
5710        return -TARGET_EFAULT;
5711    }
5712    filter->flags = tswap16(target_filter->flags);
5713    filter->count = tswap16(target_filter->count);
5714    unlock_user(target_filter, arg, 0);
5715
5716    if (filter->count) {
5717        if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5718            MAX_STRUCT_SIZE) {
5719            return -TARGET_EFAULT;
5720        }
5721
5722        target_addr = lock_user(VERIFY_READ,
5723                                arg + offsetof(struct tun_filter, addr),
5724                                filter->count * ETH_ALEN, 1);
5725        if (!target_addr) {
5726            return -TARGET_EFAULT;
5727        }
5728        memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5729        unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5730    }
5731
5732    return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5733}
5734
5735IOCTLEntry ioctl_entries[] = {
5736#define IOCTL(cmd, access, ...) \
5737    { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5738#define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5739    { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5740#define IOCTL_IGNORE(cmd) \
5741    { TARGET_ ## cmd, 0, #cmd },
5742#include "ioctls.h"
5743    { 0, 0, },
5744};
5745
5746/* ??? Implement proper locking for ioctls.  */
5747/* do_ioctl() Must return target values and target errnos. */
5748static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5749{
5750    const IOCTLEntry *ie;
5751    const argtype *arg_type;
5752    abi_long ret;
5753    uint8_t buf_temp[MAX_STRUCT_SIZE];
5754    int target_size;
5755    void *argptr;
5756
5757    ie = ioctl_entries;
5758    for(;;) {
5759        if (ie->target_cmd == 0) {
5760            qemu_log_mask(
5761                LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5762            return -TARGET_ENOSYS;
5763        }
5764        if (ie->target_cmd == cmd)
5765            break;
5766        ie++;
5767    }
5768    arg_type = ie->arg_type;
5769    if (ie->do_ioctl) {
5770        return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5771    } else if (!ie->host_cmd) {
5772        /* Some architectures define BSD ioctls in their headers
5773           that are not implemented in Linux.  */
5774        return -TARGET_ENOSYS;
5775    }
5776
5777    switch(arg_type[0]) {
5778    case TYPE_NULL:
5779        /* no argument */
5780        ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5781        break;
5782    case TYPE_PTRVOID:
5783    case TYPE_INT:
5784    case TYPE_LONG:
5785    case TYPE_ULONG:
5786        ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5787        break;
5788    case TYPE_PTR:
5789        arg_type++;
5790        target_size = thunk_type_size(arg_type, 0);
5791        switch(ie->access) {
5792        case IOC_R:
5793            ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5794            if (!is_error(ret)) {
5795                argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5796                if (!argptr)
5797                    return -TARGET_EFAULT;
5798                thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5799                unlock_user(argptr, arg, target_size);
5800            }
5801            break;
5802        case IOC_W:
5803            argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5804            if (!argptr)
5805                return -TARGET_EFAULT;
5806            thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5807            unlock_user(argptr, arg, 0);
5808            ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5809            break;
5810        default:
5811        case IOC_RW:
5812            argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5813            if (!argptr)
5814                return -TARGET_EFAULT;
5815            thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5816            unlock_user(argptr, arg, 0);
5817            ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5818            if (!is_error(ret)) {
5819                argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5820                if (!argptr)
5821                    return -TARGET_EFAULT;
5822                thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5823                unlock_user(argptr, arg, target_size);
5824            }
5825            break;
5826        }
5827        break;
5828    default:
5829        qemu_log_mask(LOG_UNIMP,
5830                      "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5831                      (long)cmd, arg_type[0]);
5832        ret = -TARGET_ENOSYS;
5833        break;
5834    }
5835    return ret;
5836}
5837
5838static const bitmask_transtbl iflag_tbl[] = {
5839        { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5840        { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5841        { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5842        { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5843        { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5844        { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5845        { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5846        { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5847        { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5848        { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5849        { TARGET_IXON, TARGET_IXON, IXON, IXON },
5850        { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5851        { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5852        { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5853        { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5854        { 0, 0, 0, 0 }
5855};
5856
5857static const bitmask_transtbl oflag_tbl[] = {
5858        { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5859        { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5860        { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5861        { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5862        { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5863        { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5864        { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5865        { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5866        { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5867        { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5868        { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5869        { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5870        { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5871        { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5872        { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5873        { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5874        { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5875        { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5876        { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5877        { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5878        { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5879        { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5880        { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5881        { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5882        { 0, 0, 0, 0 }
5883};
5884
5885static const bitmask_transtbl cflag_tbl[] = {
5886        { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5887        { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5888        { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5889        { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5890        { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5891        { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5892        { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5893        { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5894        { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5895        { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5896        { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5897        { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5898        { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5899        { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5900        { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5901        { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5902        { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5903        { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5904        { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5905        { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5906        { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5907        { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5908        { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5909        { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5910        { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5911        { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5912        { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5913        { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5914        { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5915        { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5916        { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5917        { 0, 0, 0, 0 }
5918};
5919
5920static const bitmask_transtbl lflag_tbl[] = {
5921  { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5922  { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5923  { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5924  { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5925  { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5926  { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5927  { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5928  { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5929  { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5930  { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5931  { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5932  { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5933  { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5934  { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5935  { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5936  { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5937  { 0, 0, 0, 0 }
5938};
5939
5940static void target_to_host_termios (void *dst, const void *src)
5941{
5942    struct host_termios *host = dst;
5943    const struct target_termios *target = src;
5944
5945    host->c_iflag =
5946        target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5947    host->c_oflag =
5948        target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5949    host->c_cflag =
5950        target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5951    host->c_lflag =
5952        target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5953    host->c_line = target->c_line;
5954
5955    memset(host->c_cc, 0, sizeof(host->c_cc));
5956    host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5957    host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5958    host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5959    host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5960    host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5961    host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5962    host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5963    host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5964    host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5965    host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5966    host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5967    host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5968    host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5969    host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5970    host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5971    host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5972    host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5973}
5974
5975static void host_to_target_termios (void *dst, const void *src)
5976{
5977    struct target_termios *target = dst;
5978    const struct host_termios *host = src;
5979
5980    target->c_iflag =
5981        tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5982    target->c_oflag =
5983        tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5984    target->c_cflag =
5985        tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5986    target->c_lflag =
5987        tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5988    target->c_line = host->c_line;
5989
5990    memset(target->c_cc, 0, sizeof(target->c_cc));
5991    target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5992    target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5993    target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5994    target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5995    target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5996    target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5997    target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5998    target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5999    target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6000    target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6001    target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6002    target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6003    target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6004    target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6005    target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6006    target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6007    target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6008}
6009
6010static const StructEntry struct_termios_def = {
6011    .convert = { host_to_target_termios, target_to_host_termios },
6012    .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6013    .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6014    .print = print_termios,
6015};
6016
6017static const bitmask_transtbl mmap_flags_tbl[] = {
6018    { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6019    { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6020    { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6021    { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6022      MAP_ANONYMOUS, MAP_ANONYMOUS },
6023    { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6024      MAP_GROWSDOWN, MAP_GROWSDOWN },
6025    { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6026      MAP_DENYWRITE, MAP_DENYWRITE },
6027    { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6028      MAP_EXECUTABLE, MAP_EXECUTABLE },
6029    { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6030    { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6031      MAP_NORESERVE, MAP_NORESERVE },
6032    { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6033    /* MAP_STACK had been ignored by the kernel for quite some time.
6034       Recognize it for the target insofar as we do not want to pass
6035       it through to the host.  */
6036    { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6037    { 0, 0, 0, 0 }
6038};
6039
6040/*
6041 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6042 *       TARGET_I386 is defined if TARGET_X86_64 is defined
6043 */
6044#if defined(TARGET_I386)
6045
6046/* NOTE: there is really one LDT for all the threads */
6047static uint8_t *ldt_table;
6048
6049static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6050{
6051    int size;
6052    void *p;
6053
6054    if (!ldt_table)
6055        return 0;
6056    size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6057    if (size > bytecount)
6058        size = bytecount;
6059    p = lock_user(VERIFY_WRITE, ptr, size, 0);
6060    if (!p)
6061        return -TARGET_EFAULT;
6062    /* ??? Should this by byteswapped?  */
6063    memcpy(p, ldt_table, size);
6064    unlock_user(p, ptr, size);
6065    return size;
6066}
6067
6068/* XXX: add locking support */
6069static abi_long write_ldt(CPUX86State *env,
6070                          abi_ulong ptr, unsigned long bytecount, int oldmode)
6071{
6072    struct target_modify_ldt_ldt_s ldt_info;
6073    struct target_modify_ldt_ldt_s *target_ldt_info;
6074    int seg_32bit, contents, read_exec_only, limit_in_pages;
6075    int seg_not_present, useable, lm;
6076    uint32_t *lp, entry_1, entry_2;
6077
6078    if (bytecount != sizeof(ldt_info))
6079        return -TARGET_EINVAL;
6080    if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6081        return -TARGET_EFAULT;
6082    ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6083    ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6084    ldt_info.limit = tswap32(target_ldt_info->limit);
6085    ldt_info.flags = tswap32(target_ldt_info->flags);
6086    unlock_user_struct(target_ldt_info, ptr, 0);
6087
6088    if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6089        return -TARGET_EINVAL;
6090    seg_32bit = ldt_info.flags & 1;
6091    contents = (ldt_info.flags >> 1) & 3;
6092    read_exec_only = (ldt_info.flags >> 3) & 1;
6093    limit_in_pages = (ldt_info.flags >> 4) & 1;
6094    seg_not_present = (ldt_info.flags >> 5) & 1;
6095    useable = (ldt_info.flags >> 6) & 1;
6096#ifdef TARGET_ABI32
6097    lm = 0;
6098#else
6099    lm = (ldt_info.flags >> 7) & 1;
6100#endif
6101    if (contents == 3) {
6102        if (oldmode)
6103            return -TARGET_EINVAL;
6104        if (seg_not_present == 0)
6105            return -TARGET_EINVAL;
6106    }
6107    /* allocate the LDT */
6108    if (!ldt_table) {
6109        env->ldt.base = target_mmap(0,
6110                                    TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6111                                    PROT_READ|PROT_WRITE,
6112                                    MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6113        if (env->ldt.base == -1)
6114            return -TARGET_ENOMEM;
6115        memset(g2h_untagged(env->ldt.base), 0,
6116               TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6117        env->ldt.limit = 0xffff;
6118        ldt_table = g2h_untagged(env->ldt.base);
6119    }
6120
6121    /* NOTE: same code as Linux kernel */
6122    /* Allow LDTs to be cleared by the user. */
6123    if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6124        if (oldmode ||
6125            (contents == 0              &&
6126             read_exec_only == 1        &&
6127             seg_32bit == 0             &&
6128             limit_in_pages == 0        &&
6129             seg_not_present == 1       &&
6130             useable == 0 )) {
6131            entry_1 = 0;
6132            entry_2 = 0;
6133            goto install;
6134        }
6135    }
6136
6137    entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6138        (ldt_info.limit & 0x0ffff);
6139    entry_2 = (ldt_info.base_addr & 0xff000000) |
6140        ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6141        (ldt_info.limit & 0xf0000) |
6142        ((read_exec_only ^ 1) << 9) |
6143        (contents << 10) |
6144        ((seg_not_present ^ 1) << 15) |
6145        (seg_32bit << 22) |
6146        (limit_in_pages << 23) |
6147        (lm << 21) |
6148        0x7000;
6149    if (!oldmode)
6150        entry_2 |= (useable << 20);
6151
6152    /* Install the new entry ...  */
6153install:
6154    lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6155    lp[0] = tswap32(entry_1);
6156    lp[1] = tswap32(entry_2);
6157    return 0;
6158}
6159
6160/* specific and weird i386 syscalls */
6161static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6162                              unsigned long bytecount)
6163{
6164    abi_long ret;
6165
6166    switch (func) {
6167    case 0:
6168        ret = read_ldt(ptr, bytecount);
6169        break;
6170    case 1:
6171        ret = write_ldt(env, ptr, bytecount, 1);
6172        break;
6173    case 0x11:
6174        ret = write_ldt(env, ptr, bytecount, 0);
6175        break;
6176    default:
6177        ret = -TARGET_ENOSYS;
6178        break;
6179    }
6180    return ret;
6181}
6182
6183#if defined(TARGET_ABI32)
6184abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6185{
6186    uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6187    struct target_modify_ldt_ldt_s ldt_info;
6188    struct target_modify_ldt_ldt_s *target_ldt_info;
6189    int seg_32bit, contents, read_exec_only, limit_in_pages;
6190    int seg_not_present, useable, lm;
6191    uint32_t *lp, entry_1, entry_2;
6192    int i;
6193
6194    lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6195    if (!target_ldt_info)
6196        return -TARGET_EFAULT;
6197    ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6198    ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6199    ldt_info.limit = tswap32(target_ldt_info->limit);
6200    ldt_info.flags = tswap32(target_ldt_info->flags);
6201    if (ldt_info.entry_number == -1) {
6202        for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6203            if (gdt_table[i] == 0) {
6204                ldt_info.entry_number = i;
6205                target_ldt_info->entry_number = tswap32(i);
6206                break;
6207            }
6208        }
6209    }
6210    unlock_user_struct(target_ldt_info, ptr, 1);
6211
6212    if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 
6213        ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6214           return -TARGET_EINVAL;
6215    seg_32bit = ldt_info.flags & 1;
6216    contents = (ldt_info.flags >> 1) & 3;
6217    read_exec_only = (ldt_info.flags >> 3) & 1;
6218    limit_in_pages = (ldt_info.flags >> 4) & 1;
6219    seg_not_present = (ldt_info.flags >> 5) & 1;
6220    useable = (ldt_info.flags >> 6) & 1;
6221#ifdef TARGET_ABI32
6222    lm = 0;
6223#else
6224    lm = (ldt_info.flags >> 7) & 1;
6225#endif
6226
6227    if (contents == 3) {
6228        if (seg_not_present == 0)
6229            return -TARGET_EINVAL;
6230    }
6231
6232    /* NOTE: same code as Linux kernel */
6233    /* Allow LDTs to be cleared by the user. */
6234    if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6235        if ((contents == 0             &&
6236             read_exec_only == 1       &&
6237             seg_32bit == 0            &&
6238             limit_in_pages == 0       &&
6239             seg_not_present == 1      &&
6240             useable == 0 )) {
6241            entry_1 = 0;
6242            entry_2 = 0;
6243            goto install;
6244        }
6245    }
6246
6247    entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6248        (ldt_info.limit & 0x0ffff);
6249    entry_2 = (ldt_info.base_addr & 0xff000000) |
6250        ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6251        (ldt_info.limit & 0xf0000) |
6252        ((read_exec_only ^ 1) << 9) |
6253        (contents << 10) |
6254        ((seg_not_present ^ 1) << 15) |
6255        (seg_32bit << 22) |
6256        (limit_in_pages << 23) |
6257        (useable << 20) |
6258        (lm << 21) |
6259        0x7000;
6260
6261    /* Install the new entry ...  */
6262install:
6263    lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6264    lp[0] = tswap32(entry_1);
6265    lp[1] = tswap32(entry_2);
6266    return 0;
6267}
6268
6269static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6270{
6271    struct target_modify_ldt_ldt_s *target_ldt_info;
6272    uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6273    uint32_t base_addr, limit, flags;
6274    int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6275    int seg_not_present, useable, lm;
6276    uint32_t *lp, entry_1, entry_2;
6277
6278    lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6279    if (!target_ldt_info)
6280        return -TARGET_EFAULT;
6281    idx = tswap32(target_ldt_info->entry_number);
6282    if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6283        idx > TARGET_GDT_ENTRY_TLS_MAX) {
6284        unlock_user_struct(target_ldt_info, ptr, 1);
6285        return -TARGET_EINVAL;
6286    }
6287    lp = (uint32_t *)(gdt_table + idx);
6288    entry_1 = tswap32(lp[0]);
6289    entry_2 = tswap32(lp[1]);
6290    
6291    read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6292    contents = (entry_2 >> 10) & 3;
6293    seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6294    seg_32bit = (entry_2 >> 22) & 1;
6295    limit_in_pages = (entry_2 >> 23) & 1;
6296    useable = (entry_2 >> 20) & 1;
6297#ifdef TARGET_ABI32
6298    lm = 0;
6299#else
6300    lm = (entry_2 >> 21) & 1;
6301#endif
6302    flags = (seg_32bit << 0) | (contents << 1) |
6303        (read_exec_only << 3) | (limit_in_pages << 4) |
6304        (seg_not_present << 5) | (useable << 6) | (lm << 7);
6305    limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6306    base_addr = (entry_1 >> 16) | 
6307        (entry_2 & 0xff000000) | 
6308        ((entry_2 & 0xff) << 16);
6309    target_ldt_info->base_addr = tswapal(base_addr);
6310    target_ldt_info->limit = tswap32(limit);
6311    target_ldt_info->flags = tswap32(flags);
6312    unlock_user_struct(target_ldt_info, ptr, 1);
6313    return 0;
6314}
6315
6316abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6317{
6318    return -TARGET_ENOSYS;
6319}
6320#else
6321abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6322{
6323    abi_long ret = 0;
6324    abi_ulong val;
6325    int idx;
6326
6327    switch(code) {
6328    case TARGET_ARCH_SET_GS:
6329    case TARGET_ARCH_SET_FS:
6330        if (code == TARGET_ARCH_SET_GS)
6331            idx = R_GS;
6332        else
6333            idx = R_FS;
6334        cpu_x86_load_seg(env, idx, 0);
6335        env->segs[idx].base = addr;
6336        break;
6337    case TARGET_ARCH_GET_GS:
6338    case TARGET_ARCH_GET_FS:
6339        if (code == TARGET_ARCH_GET_GS)
6340            idx = R_GS;
6341        else
6342            idx = R_FS;
6343        val = env->segs[idx].base;
6344        if (put_user(val, addr, abi_ulong))
6345            ret = -TARGET_EFAULT;
6346        break;
6347    default:
6348        ret = -TARGET_EINVAL;
6349        break;
6350    }
6351    return ret;
6352}
6353#endif /* defined(TARGET_ABI32 */
6354#endif /* defined(TARGET_I386) */
6355
6356/*
6357 * These constants are generic.  Supply any that are missing from the host.
6358 */
6359#ifndef PR_SET_NAME
6360# define PR_SET_NAME    15
6361# define PR_GET_NAME    16
6362#endif
6363#ifndef PR_SET_FP_MODE
6364# define PR_SET_FP_MODE 45
6365# define PR_GET_FP_MODE 46
6366# define PR_FP_MODE_FR   (1 << 0)
6367# define PR_FP_MODE_FRE  (1 << 1)
6368#endif
6369#ifndef PR_SVE_SET_VL
6370# define PR_SVE_SET_VL  50
6371# define PR_SVE_GET_VL  51
6372# define PR_SVE_VL_LEN_MASK  0xffff
6373# define PR_SVE_VL_INHERIT   (1 << 17)
6374#endif
6375#ifndef PR_PAC_RESET_KEYS
6376# define PR_PAC_RESET_KEYS  54
6377# define PR_PAC_APIAKEY   (1 << 0)
6378# define PR_PAC_APIBKEY   (1 << 1)
6379# define PR_PAC_APDAKEY   (1 << 2)
6380# define PR_PAC_APDBKEY   (1 << 3)
6381# define PR_PAC_APGAKEY   (1 << 4)
6382#endif
6383#ifndef PR_SET_TAGGED_ADDR_CTRL
6384# define PR_SET_TAGGED_ADDR_CTRL 55
6385# define PR_GET_TAGGED_ADDR_CTRL 56
6386# define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6387#endif
6388#ifndef PR_MTE_TCF_SHIFT
6389# define PR_MTE_TCF_SHIFT       1
6390# define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6391# define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6392# define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6393# define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6394# define PR_MTE_TAG_SHIFT       3
6395# define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6396#endif
6397#ifndef PR_SET_IO_FLUSHER
6398# define PR_SET_IO_FLUSHER 57
6399# define PR_GET_IO_FLUSHER 58
6400#endif
6401#ifndef PR_SET_SYSCALL_USER_DISPATCH
6402# define PR_SET_SYSCALL_USER_DISPATCH 59
6403#endif
6404#ifndef PR_SME_SET_VL
6405# define PR_SME_SET_VL  63
6406# define PR_SME_GET_VL  64
6407# define PR_SME_VL_LEN_MASK  0xffff
6408# define PR_SME_VL_INHERIT   (1 << 17)
6409#endif
6410
6411#include "target_prctl.h"
6412
6413static abi_long do_prctl_inval0(CPUArchState *env)
6414{
6415    return -TARGET_EINVAL;
6416}
6417
6418static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6419{
6420    return -TARGET_EINVAL;
6421}
6422
6423#ifndef do_prctl_get_fp_mode
6424#define do_prctl_get_fp_mode do_prctl_inval0
6425#endif
6426#ifndef do_prctl_set_fp_mode
6427#define do_prctl_set_fp_mode do_prctl_inval1
6428#endif
6429#ifndef do_prctl_sve_get_vl
6430#define do_prctl_sve_get_vl do_prctl_inval0
6431#endif
6432#ifndef do_prctl_sve_set_vl
6433#define do_prctl_sve_set_vl do_prctl_inval1
6434#endif
6435#ifndef do_prctl_reset_keys
6436#define do_prctl_reset_keys do_prctl_inval1
6437#endif
6438#ifndef do_prctl_set_tagged_addr_ctrl
6439#define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6440#endif
6441#ifndef do_prctl_get_tagged_addr_ctrl
6442#define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6443#endif
6444#ifndef do_prctl_get_unalign
6445#define do_prctl_get_unalign do_prctl_inval1
6446#endif
6447#ifndef do_prctl_set_unalign
6448#define do_prctl_set_unalign do_prctl_inval1
6449#endif
6450#ifndef do_prctl_sme_get_vl
6451#define do_prctl_sme_get_vl do_prctl_inval0
6452#endif
6453#ifndef do_prctl_sme_set_vl
6454#define do_prctl_sme_set_vl do_prctl_inval1
6455#endif
6456
6457static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6458                         abi_long arg3, abi_long arg4, abi_long arg5)
6459{
6460    abi_long ret;
6461
6462    switch (option) {
6463    case PR_GET_PDEATHSIG:
6464        {
6465            int deathsig;
6466            ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6467                                  arg3, arg4, arg5));
6468            if (!is_error(ret) &&
6469                put_user_s32(host_to_target_signal(deathsig), arg2)) {
6470                return -TARGET_EFAULT;
6471            }
6472            return ret;
6473        }
6474    case PR_SET_PDEATHSIG:
6475        return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6476                               arg3, arg4, arg5));
6477    case PR_GET_NAME:
6478        {
6479            void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6480            if (!name) {
6481                return -TARGET_EFAULT;
6482            }
6483            ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6484                                  arg3, arg4, arg5));
6485            unlock_user(name, arg2, 16);
6486            return ret;
6487        }
6488    case PR_SET_NAME:
6489        {
6490            void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6491            if (!name) {
6492                return -TARGET_EFAULT;
6493            }
6494            ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6495                                  arg3, arg4, arg5));
6496            unlock_user(name, arg2, 0);
6497            return ret;
6498        }
6499    case PR_GET_FP_MODE:
6500        return do_prctl_get_fp_mode(env);
6501    case PR_SET_FP_MODE:
6502        return do_prctl_set_fp_mode(env, arg2);
6503    case PR_SVE_GET_VL:
6504        return do_prctl_sve_get_vl(env);
6505    case PR_SVE_SET_VL:
6506        return do_prctl_sve_set_vl(env, arg2);
6507    case PR_SME_GET_VL:
6508        return do_prctl_sme_get_vl(env);
6509    case PR_SME_SET_VL:
6510        return do_prctl_sme_set_vl(env, arg2);
6511    case PR_PAC_RESET_KEYS:
6512        if (arg3 || arg4 || arg5) {
6513            return -TARGET_EINVAL;
6514        }
6515        return do_prctl_reset_keys(env, arg2);
6516    case PR_SET_TAGGED_ADDR_CTRL:
6517        if (arg3 || arg4 || arg5) {
6518            return -TARGET_EINVAL;
6519        }
6520        return do_prctl_set_tagged_addr_ctrl(env, arg2);
6521    case PR_GET_TAGGED_ADDR_CTRL:
6522        if (arg2 || arg3 || arg4 || arg5) {
6523            return -TARGET_EINVAL;
6524        }
6525        return do_prctl_get_tagged_addr_ctrl(env);
6526
6527    case PR_GET_UNALIGN:
6528        return do_prctl_get_unalign(env, arg2);
6529    case PR_SET_UNALIGN:
6530        return do_prctl_set_unalign(env, arg2);
6531
6532    case PR_CAP_AMBIENT:
6533    case PR_CAPBSET_READ:
6534    case PR_CAPBSET_DROP:
6535    case PR_GET_DUMPABLE:
6536    case PR_SET_DUMPABLE:
6537    case PR_GET_KEEPCAPS:
6538    case PR_SET_KEEPCAPS:
6539    case PR_GET_SECUREBITS:
6540    case PR_SET_SECUREBITS:
6541    case PR_GET_TIMING:
6542    case PR_SET_TIMING:
6543    case PR_GET_TIMERSLACK:
6544    case PR_SET_TIMERSLACK:
6545    case PR_MCE_KILL:
6546    case PR_MCE_KILL_GET:
6547    case PR_GET_NO_NEW_PRIVS:
6548    case PR_SET_NO_NEW_PRIVS:
6549    case PR_GET_IO_FLUSHER:
6550    case PR_SET_IO_FLUSHER:
6551        /* Some prctl options have no pointer arguments and we can pass on. */
6552        return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6553
6554    case PR_GET_CHILD_SUBREAPER:
6555    case PR_SET_CHILD_SUBREAPER:
6556    case PR_GET_SPECULATION_CTRL:
6557    case PR_SET_SPECULATION_CTRL:
6558    case PR_GET_TID_ADDRESS:
6559        /* TODO */
6560        return -TARGET_EINVAL;
6561
6562    case PR_GET_FPEXC:
6563    case PR_SET_FPEXC:
6564        /* Was used for SPE on PowerPC. */
6565        return -TARGET_EINVAL;
6566
6567    case PR_GET_ENDIAN:
6568    case PR_SET_ENDIAN:
6569    case PR_GET_FPEMU:
6570    case PR_SET_FPEMU:
6571    case PR_SET_MM:
6572    case PR_GET_SECCOMP:
6573    case PR_SET_SECCOMP:
6574    case PR_SET_SYSCALL_USER_DISPATCH:
6575    case PR_GET_THP_DISABLE:
6576    case PR_SET_THP_DISABLE:
6577    case PR_GET_TSC:
6578    case PR_SET_TSC:
6579        /* Disable to prevent the target disabling stuff we need. */
6580        return -TARGET_EINVAL;
6581
6582    default:
6583        qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6584                      option);
6585        return -TARGET_EINVAL;
6586    }
6587}
6588
6589#define NEW_STACK_SIZE 0x40000
6590
6591
6592static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6593typedef struct {
6594    CPUArchState *env;
6595    pthread_mutex_t mutex;
6596    pthread_cond_t cond;
6597    pthread_t thread;
6598    uint32_t tid;
6599    abi_ulong child_tidptr;
6600    abi_ulong parent_tidptr;
6601    sigset_t sigmask;
6602} new_thread_info;
6603
6604static void *clone_func(void *arg)
6605{
6606    new_thread_info *info = arg;
6607    CPUArchState *env;
6608    CPUState *cpu;
6609    TaskState *ts;
6610
6611    rcu_register_thread();
6612    tcg_register_thread();
6613    env = info->env;
6614    cpu = env_cpu(env);
6615    thread_cpu = cpu;
6616    ts = (TaskState *)cpu->opaque;
6617    info->tid = sys_gettid();
6618    task_settid(ts);
6619    if (info->child_tidptr)
6620        put_user_u32(info->tid, info->child_tidptr);
6621    if (info->parent_tidptr)
6622        put_user_u32(info->tid, info->parent_tidptr);
6623    qemu_guest_random_seed_thread_part2(cpu->random_seed);
6624    /* Enable signals.  */
6625    sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6626    /* Signal to the parent that we're ready.  */
6627    pthread_mutex_lock(&info->mutex);
6628    pthread_cond_broadcast(&info->cond);
6629    pthread_mutex_unlock(&info->mutex);
6630    /* Wait until the parent has finished initializing the tls state.  */
6631    pthread_mutex_lock(&clone_lock);
6632    pthread_mutex_unlock(&clone_lock);
6633    cpu_loop(env);
6634    /* never exits */
6635    return NULL;
6636}
6637
6638/* do_fork() Must return host values and target errnos (unlike most
6639   do_*() functions). */
6640static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6641                   abi_ulong parent_tidptr, target_ulong newtls,
6642                   abi_ulong child_tidptr)
6643{
6644    CPUState *cpu = env_cpu(env);
6645    int ret;
6646    TaskState *ts;
6647    CPUState *new_cpu;
6648    CPUArchState *new_env;
6649    sigset_t sigmask;
6650
6651    flags &= ~CLONE_IGNORED_FLAGS;
6652
6653    /* Emulate vfork() with fork() */
6654    if (flags & CLONE_VFORK)
6655        flags &= ~(CLONE_VFORK | CLONE_VM);
6656
6657    if (flags & CLONE_VM) {
6658        TaskState *parent_ts = (TaskState *)cpu->opaque;
6659        new_thread_info info;
6660        pthread_attr_t attr;
6661
6662        if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6663            (flags & CLONE_INVALID_THREAD_FLAGS)) {
6664            return -TARGET_EINVAL;
6665        }
6666
6667        ts = g_new0(TaskState, 1);
6668        init_task_state(ts);
6669
6670        /* Grab a mutex so that thread setup appears atomic.  */
6671        pthread_mutex_lock(&clone_lock);
6672
6673        /*
6674         * If this is our first additional thread, we need to ensure we
6675         * generate code for parallel execution and flush old translations.
6676         * Do this now so that the copy gets CF_PARALLEL too.
6677         */
6678        if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6679            cpu->tcg_cflags |= CF_PARALLEL;
6680            tb_flush(cpu);
6681        }
6682
6683        /* we create a new CPU instance. */
6684        new_env = cpu_copy(env);
6685        /* Init regs that differ from the parent.  */
6686        cpu_clone_regs_child(new_env, newsp, flags);
6687        cpu_clone_regs_parent(env, flags);
6688        new_cpu = env_cpu(new_env);
6689        new_cpu->opaque = ts;
6690        ts->bprm = parent_ts->bprm;
6691        ts->info = parent_ts->info;
6692        ts->signal_mask = parent_ts->signal_mask;
6693
6694        if (flags & CLONE_CHILD_CLEARTID) {
6695            ts->child_tidptr = child_tidptr;
6696        }
6697
6698        if (flags & CLONE_SETTLS) {
6699            cpu_set_tls (new_env, newtls);
6700        }
6701
6702        memset(&info, 0, sizeof(info));
6703        pthread_mutex_init(&info.mutex, NULL);
6704        pthread_mutex_lock(&info.mutex);
6705        pthread_cond_init(&info.cond, NULL);
6706        info.env = new_env;
6707        if (flags & CLONE_CHILD_SETTID) {
6708            info.child_tidptr = child_tidptr;
6709        }
6710        if (flags & CLONE_PARENT_SETTID) {
6711            info.parent_tidptr = parent_tidptr;
6712        }
6713
6714        ret = pthread_attr_init(&attr);
6715        ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6716        ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6717        /* It is not safe to deliver signals until the child has finished
6718           initializing, so temporarily block all signals.  */
6719        sigfillset(&sigmask);
6720        sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6721        cpu->random_seed = qemu_guest_random_seed_thread_part1();
6722
6723        ret = pthread_create(&info.thread, &attr, clone_func, &info);
6724        /* TODO: Free new CPU state if thread creation failed.  */
6725
6726        sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6727        pthread_attr_destroy(&attr);
6728        if (ret == 0) {
6729            /* Wait for the child to initialize.  */
6730            pthread_cond_wait(&info.cond, &info.mutex);
6731            ret = info.tid;
6732        } else {
6733            ret = -1;
6734        }
6735        pthread_mutex_unlock(&info.mutex);
6736        pthread_cond_destroy(&info.cond);
6737        pthread_mutex_destroy(&info.mutex);
6738        pthread_mutex_unlock(&clone_lock);
6739    } else {
6740        /* if no CLONE_VM, we consider it is a fork */
6741        if (flags & CLONE_INVALID_FORK_FLAGS) {
6742            return -TARGET_EINVAL;
6743        }
6744
6745        /* We can't support custom termination signals */
6746        if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6747            return -TARGET_EINVAL;
6748        }
6749
6750        if (block_signals()) {
6751            return -QEMU_ERESTARTSYS;
6752        }
6753
6754        fork_start();
6755        ret = fork();
6756        if (ret == 0) {
6757            /* Child Process.  */
6758            cpu_clone_regs_child(env, newsp, flags);
6759            fork_end(1);
6760            /* There is a race condition here.  The parent process could
6761               theoretically read the TID in the child process before the child
6762               tid is set.  This would require using either ptrace
6763               (not implemented) or having *_tidptr to point at a shared memory
6764               mapping.  We can't repeat the spinlock hack used above because
6765               the child process gets its own copy of the lock.  */
6766            if (flags & CLONE_CHILD_SETTID)
6767                put_user_u32(sys_gettid(), child_tidptr);
6768            if (flags & CLONE_PARENT_SETTID)
6769                put_user_u32(sys_gettid(), parent_tidptr);
6770            ts = (TaskState *)cpu->opaque;
6771            if (flags & CLONE_SETTLS)
6772                cpu_set_tls (env, newtls);
6773            if (flags & CLONE_CHILD_CLEARTID)
6774                ts->child_tidptr = child_tidptr;
6775        } else {
6776            cpu_clone_regs_parent(env, flags);
6777            fork_end(0);
6778        }
6779    }
6780    return ret;
6781}
6782
6783/* warning : doesn't handle linux specific flags... */
6784static int target_to_host_fcntl_cmd(int cmd)
6785{
6786    int ret;
6787
6788    switch(cmd) {
6789    case TARGET_F_DUPFD:
6790    case TARGET_F_GETFD:
6791    case TARGET_F_SETFD:
6792    case TARGET_F_GETFL:
6793    case TARGET_F_SETFL:
6794    case TARGET_F_OFD_GETLK:
6795    case TARGET_F_OFD_SETLK:
6796    case TARGET_F_OFD_SETLKW:
6797        ret = cmd;
6798        break;
6799    case TARGET_F_GETLK:
6800        ret = F_GETLK64;
6801        break;
6802    case TARGET_F_SETLK:
6803        ret = F_SETLK64;
6804        break;
6805    case TARGET_F_SETLKW:
6806        ret = F_SETLKW64;
6807        break;
6808    case TARGET_F_GETOWN:
6809        ret = F_GETOWN;
6810        break;
6811    case TARGET_F_SETOWN:
6812        ret = F_SETOWN;
6813        break;
6814    case TARGET_F_GETSIG:
6815        ret = F_GETSIG;
6816        break;
6817    case TARGET_F_SETSIG:
6818        ret = F_SETSIG;
6819        break;
6820#if TARGET_ABI_BITS == 32
6821    case TARGET_F_GETLK64:
6822        ret = F_GETLK64;
6823        break;
6824    case TARGET_F_SETLK64:
6825        ret = F_SETLK64;
6826        break;
6827    case TARGET_F_SETLKW64:
6828        ret = F_SETLKW64;
6829        break;
6830#endif
6831    case TARGET_F_SETLEASE:
6832        ret = F_SETLEASE;
6833        break;
6834    case TARGET_F_GETLEASE:
6835        ret = F_GETLEASE;
6836        break;
6837#ifdef F_DUPFD_CLOEXEC
6838    case TARGET_F_DUPFD_CLOEXEC:
6839        ret = F_DUPFD_CLOEXEC;
6840        break;
6841#endif
6842    case TARGET_F_NOTIFY:
6843        ret = F_NOTIFY;
6844        break;
6845#ifdef F_GETOWN_EX
6846    case TARGET_F_GETOWN_EX:
6847        ret = F_GETOWN_EX;
6848        break;
6849#endif
6850#ifdef F_SETOWN_EX
6851    case TARGET_F_SETOWN_EX:
6852        ret = F_SETOWN_EX;
6853        break;
6854#endif
6855#ifdef F_SETPIPE_SZ
6856    case TARGET_F_SETPIPE_SZ:
6857        ret = F_SETPIPE_SZ;
6858        break;
6859    case TARGET_F_GETPIPE_SZ:
6860        ret = F_GETPIPE_SZ;
6861        break;
6862#endif
6863#ifdef F_ADD_SEALS
6864    case TARGET_F_ADD_SEALS:
6865        ret = F_ADD_SEALS;
6866        break;
6867    case TARGET_F_GET_SEALS:
6868        ret = F_GET_SEALS;
6869        break;
6870#endif
6871    default:
6872        ret = -TARGET_EINVAL;
6873        break;
6874    }
6875
6876#if defined(__powerpc64__)
6877    /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6878     * is not supported by kernel. The glibc fcntl call actually adjusts
6879     * them to 5, 6 and 7 before making the syscall(). Since we make the
6880     * syscall directly, adjust to what is supported by the kernel.
6881     */
6882    if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6883        ret -= F_GETLK64 - 5;
6884    }
6885#endif
6886
6887    return ret;
6888}
6889
6890#define FLOCK_TRANSTBL \
6891    switch (type) { \
6892    TRANSTBL_CONVERT(F_RDLCK); \
6893    TRANSTBL_CONVERT(F_WRLCK); \
6894    TRANSTBL_CONVERT(F_UNLCK); \
6895    }
6896
6897static int target_to_host_flock(int type)
6898{
6899#define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6900    FLOCK_TRANSTBL
6901#undef  TRANSTBL_CONVERT
6902    return -TARGET_EINVAL;
6903}
6904
6905static int host_to_target_flock(int type)
6906{
6907#define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6908    FLOCK_TRANSTBL
6909#undef  TRANSTBL_CONVERT
6910    /* if we don't know how to convert the value coming
6911     * from the host we copy to the target field as-is
6912     */
6913    return type;
6914}
6915
6916static inline abi_long copy_from_user_flock(struct flock64 *fl,
6917                                            abi_ulong target_flock_addr)
6918{
6919    struct target_flock *target_fl;
6920    int l_type;
6921
6922    if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6923        return -TARGET_EFAULT;
6924    }
6925
6926    __get_user(l_type, &target_fl->l_type);
6927    l_type = target_to_host_flock(l_type);
6928    if (l_type < 0) {
6929        return l_type;
6930    }
6931    fl->l_type = l_type;
6932    __get_user(fl->l_whence, &target_fl->l_whence);
6933    __get_user(fl->l_start, &target_fl->l_start);
6934    __get_user(fl->l_len, &target_fl->l_len);
6935    __get_user(fl->l_pid, &target_fl->l_pid);
6936    unlock_user_struct(target_fl, target_flock_addr, 0);
6937    return 0;
6938}
6939
6940static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6941                                          const struct flock64 *fl)
6942{
6943    struct target_flock *target_fl;
6944    short l_type;
6945
6946    if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6947        return -TARGET_EFAULT;
6948    }
6949
6950    l_type = host_to_target_flock(fl->l_type);
6951    __put_user(l_type, &target_fl->l_type);
6952    __put_user(fl->l_whence, &target_fl->l_whence);
6953    __put_user(fl->l_start, &target_fl->l_start);
6954    __put_user(fl->l_len, &target_fl->l_len);
6955    __put_user(fl->l_pid, &target_fl->l_pid);
6956    unlock_user_struct(target_fl, target_flock_addr, 1);
6957    return 0;
6958}
6959
6960typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6961typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6962
6963#if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6964struct target_oabi_flock64 {
6965    abi_short l_type;
6966    abi_short l_whence;
6967    abi_llong l_start;
6968    abi_llong l_len;
6969    abi_int   l_pid;
6970} QEMU_PACKED;
6971
6972static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6973                                                   abi_ulong target_flock_addr)
6974{
6975    struct target_oabi_flock64 *target_fl;
6976    int l_type;
6977
6978    if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6979        return -TARGET_EFAULT;
6980    }
6981
6982    __get_user(l_type, &target_fl->l_type);
6983    l_type = target_to_host_flock(l_type);
6984    if (l_type < 0) {
6985        return l_type;
6986    }
6987    fl->l_type = l_type;
6988    __get_user(fl->l_whence, &target_fl->l_whence);
6989    __get_user(fl->l_start, &target_fl->l_start);
6990    __get_user(fl->l_len, &target_fl->l_len);
6991    __get_user(fl->l_pid, &target_fl->l_pid);
6992    unlock_user_struct(target_fl, target_flock_addr, 0);
6993    return 0;
6994}
6995
6996static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6997                                                 const struct flock64 *fl)
6998{
6999    struct target_oabi_flock64 *target_fl;
7000    short l_type;
7001
7002    if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7003        return -TARGET_EFAULT;
7004    }
7005
7006    l_type = host_to_target_flock(fl->l_type);
7007    __put_user(l_type, &target_fl->l_type);
7008    __put_user(fl->l_whence, &target_fl->l_whence);
7009    __put_user(fl->l_start, &target_fl->l_start);
7010    __put_user(fl->l_len, &target_fl->l_len);
7011    __put_user(fl->l_pid, &target_fl->l_pid);
7012    unlock_user_struct(target_fl, target_flock_addr, 1);
7013    return 0;
7014}
7015#endif
7016
7017static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7018                                              abi_ulong target_flock_addr)
7019{
7020    struct target_flock64 *target_fl;
7021    int l_type;
7022
7023    if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7024        return -TARGET_EFAULT;
7025    }
7026
7027    __get_user(l_type, &target_fl->l_type);
7028    l_type = target_to_host_flock(l_type);
7029    if (l_type < 0) {
7030        return l_type;
7031    }
7032    fl->l_type = l_type;
7033    __get_user(fl->l_whence, &target_fl->l_whence);
7034    __get_user(fl->l_start, &target_fl->l_start);
7035    __get_user(fl->l_len, &target_fl->l_len);
7036    __get_user(fl->l_pid, &target_fl->l_pid);
7037    unlock_user_struct(target_fl, target_flock_addr, 0);
7038    return 0;
7039}
7040
7041static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7042                                            const struct flock64 *fl)
7043{
7044    struct target_flock64 *target_fl;
7045    short l_type;
7046
7047    if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7048        return -TARGET_EFAULT;
7049    }
7050
7051    l_type = host_to_target_flock(fl->l_type);
7052    __put_user(l_type, &target_fl->l_type);
7053    __put_user(fl->l_whence, &target_fl->l_whence);
7054    __put_user(fl->l_start, &target_fl->l_start);
7055    __put_user(fl->l_len, &target_fl->l_len);
7056    __put_user(fl->l_pid, &target_fl->l_pid);
7057    unlock_user_struct(target_fl, target_flock_addr, 1);
7058    return 0;
7059}
7060
7061static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7062{
7063    struct flock64 fl64;
7064#ifdef F_GETOWN_EX
7065    struct f_owner_ex fox;
7066    struct target_f_owner_ex *target_fox;
7067#endif
7068    abi_long ret;
7069    int host_cmd = target_to_host_fcntl_cmd(cmd);
7070
7071    if (host_cmd == -TARGET_EINVAL)
7072            return host_cmd;
7073
7074    switch(cmd) {
7075    case TARGET_F_GETLK:
7076        ret = copy_from_user_flock(&fl64, arg);
7077        if (ret) {
7078            return ret;
7079        }
7080        ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7081        if (ret == 0) {
7082            ret = copy_to_user_flock(arg, &fl64);
7083        }
7084        break;
7085
7086    case TARGET_F_SETLK:
7087    case TARGET_F_SETLKW:
7088        ret = copy_from_user_flock(&fl64, arg);
7089        if (ret) {
7090            return ret;
7091        }
7092        ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7093        break;
7094
7095    case TARGET_F_GETLK64:
7096    case TARGET_F_OFD_GETLK:
7097        ret = copy_from_user_flock64(&fl64, arg);
7098        if (ret) {
7099            return ret;
7100        }
7101        ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7102        if (ret == 0) {
7103            ret = copy_to_user_flock64(arg, &fl64);
7104        }
7105        break;
7106    case TARGET_F_SETLK64:
7107    case TARGET_F_SETLKW64:
7108    case TARGET_F_OFD_SETLK:
7109    case TARGET_F_OFD_SETLKW:
7110        ret = copy_from_user_flock64(&fl64, arg);
7111        if (ret) {
7112            return ret;
7113        }
7114        ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7115        break;
7116
7117    case TARGET_F_GETFL:
7118        ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7119        if (ret >= 0) {
7120            ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7121        }
7122        break;
7123
7124    case TARGET_F_SETFL:
7125        ret = get_errno(safe_fcntl(fd, host_cmd,
7126                                   target_to_host_bitmask(arg,
7127                                                          fcntl_flags_tbl)));
7128        break;
7129
7130#ifdef F_GETOWN_EX
7131    case TARGET_F_GETOWN_EX:
7132        ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7133        if (ret >= 0) {
7134            if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7135                return -TARGET_EFAULT;
7136            target_fox->type = tswap32(fox.type);
7137            target_fox->pid = tswap32(fox.pid);
7138            unlock_user_struct(target_fox, arg, 1);
7139        }
7140        break;
7141#endif
7142
7143#ifdef F_SETOWN_EX
7144    case TARGET_F_SETOWN_EX:
7145        if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7146            return -TARGET_EFAULT;
7147        fox.type = tswap32(target_fox->type);
7148        fox.pid = tswap32(target_fox->pid);
7149        unlock_user_struct(target_fox, arg, 0);
7150        ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7151        break;
7152#endif
7153
7154    case TARGET_F_SETSIG:
7155        ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7156        break;
7157
7158    case TARGET_F_GETSIG:
7159        ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7160        break;
7161
7162    case TARGET_F_SETOWN:
7163    case TARGET_F_GETOWN:
7164    case TARGET_F_SETLEASE:
7165    case TARGET_F_GETLEASE:
7166    case TARGET_F_SETPIPE_SZ:
7167    case TARGET_F_GETPIPE_SZ:
7168    case TARGET_F_ADD_SEALS:
7169    case TARGET_F_GET_SEALS:
7170        ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7171        break;
7172
7173    default:
7174        ret = get_errno(safe_fcntl(fd, cmd, arg));
7175        break;
7176    }
7177    return ret;
7178}
7179
7180#ifdef USE_UID16
7181
7182static inline int high2lowuid(int uid)
7183{
7184    if (uid > 65535)
7185        return 65534;
7186    else
7187        return uid;
7188}
7189
7190static inline int high2lowgid(int gid)
7191{
7192    if (gid > 65535)
7193        return 65534;
7194    else
7195        return gid;
7196}
7197
7198static inline int low2highuid(int uid)
7199{
7200    if ((int16_t)uid == -1)
7201        return -1;
7202    else
7203        return uid;
7204}
7205
7206static inline int low2highgid(int gid)
7207{
7208    if ((int16_t)gid == -1)
7209        return -1;
7210    else
7211        return gid;
7212}
7213static inline int tswapid(int id)
7214{
7215    return tswap16(id);
7216}
7217
7218#define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7219
7220#else /* !USE_UID16 */
7221static inline int high2lowuid(int uid)
7222{
7223    return uid;
7224}
7225static inline int high2lowgid(int gid)
7226{
7227    return gid;
7228}
7229static inline int low2highuid(int uid)
7230{
7231    return uid;
7232}
7233static inline int low2highgid(int gid)
7234{
7235    return gid;
7236}
7237static inline int tswapid(int id)
7238{
7239    return tswap32(id);
7240}
7241
7242#define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7243
7244#endif /* USE_UID16 */
7245
7246/* We must do direct syscalls for setting UID/GID, because we want to
7247 * implement the Linux system call semantics of "change only for this thread",
7248 * not the libc/POSIX semantics of "change for all threads in process".
7249 * (See http://ewontfix.com/17/ for more details.)
7250 * We use the 32-bit version of the syscalls if present; if it is not
7251 * then either the host architecture supports 32-bit UIDs natively with
7252 * the standard syscall, or the 16-bit UID is the best we can do.
7253 */
7254#ifdef __NR_setuid32
7255#define __NR_sys_setuid __NR_setuid32
7256#else
7257#define __NR_sys_setuid __NR_setuid
7258#endif
7259#ifdef __NR_setgid32
7260#define __NR_sys_setgid __NR_setgid32
7261#else
7262#define __NR_sys_setgid __NR_setgid
7263#endif
7264#ifdef __NR_setresuid32
7265#define __NR_sys_setresuid __NR_setresuid32
7266#else
7267#define __NR_sys_setresuid __NR_setresuid
7268#endif
7269#ifdef __NR_setresgid32
7270#define __NR_sys_setresgid __NR_setresgid32
7271#else
7272#define __NR_sys_setresgid __NR_setresgid
7273#endif
7274
7275_syscall1(int, sys_setuid, uid_t, uid)
7276_syscall1(int, sys_setgid, gid_t, gid)
7277_syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7278_syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7279
7280void syscall_init(void)
7281{
7282    IOCTLEntry *ie;
7283    const argtype *arg_type;
7284    int size;
7285
7286    thunk_init(STRUCT_MAX);
7287
7288#define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7289#define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7290#include "syscall_types.h"
7291#undef STRUCT
7292#undef STRUCT_SPECIAL
7293
7294    /* we patch the ioctl size if necessary. We rely on the fact that
7295       no ioctl has all the bits at '1' in the size field */
7296    ie = ioctl_entries;
7297    while (ie->target_cmd != 0) {
7298        if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7299            TARGET_IOC_SIZEMASK) {
7300            arg_type = ie->arg_type;
7301            if (arg_type[0] != TYPE_PTR) {
7302                fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7303                        ie->target_cmd);
7304                exit(1);
7305            }
7306            arg_type++;
7307            size = thunk_type_size(arg_type, 0);
7308            ie->target_cmd = (ie->target_cmd &
7309                              ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7310                (size << TARGET_IOC_SIZESHIFT);
7311        }
7312
7313        /* automatic consistency check if same arch */
7314#if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7315    (defined(__x86_64__) && defined(TARGET_X86_64))
7316        if (unlikely(ie->target_cmd != ie->host_cmd)) {
7317            fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7318                    ie->name, ie->target_cmd, ie->host_cmd);
7319        }
7320#endif
7321        ie++;
7322    }
7323}
7324
7325#ifdef TARGET_NR_truncate64
7326static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7327                                         abi_long arg2,
7328                                         abi_long arg3,
7329                                         abi_long arg4)
7330{
7331    if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7332        arg2 = arg3;
7333        arg3 = arg4;
7334    }
7335    return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7336}
7337#endif
7338
7339#ifdef TARGET_NR_ftruncate64
7340static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7341                                          abi_long arg2,
7342                                          abi_long arg3,
7343                                          abi_long arg4)
7344{
7345    if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7346        arg2 = arg3;
7347        arg3 = arg4;
7348    }
7349    return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7350}
7351#endif
7352
7353#if defined(TARGET_NR_timer_settime) || \
7354    (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7355static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7356                                                 abi_ulong target_addr)
7357{
7358    if (target_to_host_timespec(&host_its->it_interval, target_addr +
7359                                offsetof(struct target_itimerspec,
7360                                         it_interval)) ||
7361        target_to_host_timespec(&host_its->it_value, target_addr +
7362                                offsetof(struct target_itimerspec,
7363                                         it_value))) {
7364        return -TARGET_EFAULT;
7365    }
7366
7367    return 0;
7368}
7369#endif
7370
7371#if defined(TARGET_NR_timer_settime64) || \
7372    (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7373static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7374                                                   abi_ulong target_addr)
7375{
7376    if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7377                                  offsetof(struct target__kernel_itimerspec,
7378                                           it_interval)) ||
7379        target_to_host_timespec64(&host_its->it_value, target_addr +
7380                                  offsetof(struct target__kernel_itimerspec,
7381                                           it_value))) {
7382        return -TARGET_EFAULT;
7383    }
7384
7385    return 0;
7386}
7387#endif
7388
7389#if ((defined(TARGET_NR_timerfd_gettime) || \
7390      defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7391      defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7392static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7393                                                 struct itimerspec *host_its)
7394{
7395    if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7396                                                       it_interval),
7397                                &host_its->it_interval) ||
7398        host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7399                                                       it_value),
7400                                &host_its->it_value)) {
7401        return -TARGET_EFAULT;
7402    }
7403    return 0;
7404}
7405#endif
7406
7407#if ((defined(TARGET_NR_timerfd_gettime64) || \
7408      defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7409      defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7410static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7411                                                   struct itimerspec *host_its)
7412{
7413    if (host_to_target_timespec64(target_addr +
7414                                  offsetof(struct target__kernel_itimerspec,
7415                                           it_interval),
7416                                  &host_its->it_interval) ||
7417        host_to_target_timespec64(target_addr +
7418                                  offsetof(struct target__kernel_itimerspec,
7419                                           it_value),
7420                                  &host_its->it_value)) {
7421        return -TARGET_EFAULT;
7422    }
7423    return 0;
7424}
7425#endif
7426
7427#if defined(TARGET_NR_adjtimex) || \
7428    (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7429static inline abi_long target_to_host_timex(struct timex *host_tx,
7430                                            abi_long target_addr)
7431{
7432    struct target_timex *target_tx;
7433
7434    if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7435        return -TARGET_EFAULT;
7436    }
7437
7438    __get_user(host_tx->modes, &target_tx->modes);
7439    __get_user(host_tx->offset, &target_tx->offset);
7440    __get_user(host_tx->freq, &target_tx->freq);
7441    __get_user(host_tx->maxerror, &target_tx->maxerror);
7442    __get_user(host_tx->esterror, &target_tx->esterror);
7443    __get_user(host_tx->status, &target_tx->status);
7444    __get_user(host_tx->constant, &target_tx->constant);
7445    __get_user(host_tx->precision, &target_tx->precision);
7446    __get_user(host_tx->tolerance, &target_tx->tolerance);
7447    __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7448    __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7449    __get_user(host_tx->tick, &target_tx->tick);
7450    __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7451    __get_user(host_tx->jitter, &target_tx->jitter);
7452    __get_user(host_tx->shift, &target_tx->shift);
7453    __get_user(host_tx->stabil, &target_tx->stabil);
7454    __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7455    __get_user(host_tx->calcnt, &target_tx->calcnt);
7456    __get_user(host_tx->errcnt, &target_tx->errcnt);
7457    __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7458    __get_user(host_tx->tai, &target_tx->tai);
7459
7460    unlock_user_struct(target_tx, target_addr, 0);
7461    return 0;
7462}
7463
7464static inline abi_long host_to_target_timex(abi_long target_addr,
7465                                            struct timex *host_tx)
7466{
7467    struct target_timex *target_tx;
7468
7469    if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7470        return -TARGET_EFAULT;
7471    }
7472
7473    __put_user(host_tx->modes, &target_tx->modes);
7474    __put_user(host_tx->offset, &target_tx->offset);
7475    __put_user(host_tx->freq, &target_tx->freq);
7476    __put_user(host_tx->maxerror, &target_tx->maxerror);
7477    __put_user(host_tx->esterror, &target_tx->esterror);
7478    __put_user(host_tx->status, &target_tx->status);
7479    __put_user(host_tx->constant, &target_tx->constant);
7480    __put_user(host_tx->precision, &target_tx->precision);
7481    __put_user(host_tx->tolerance, &target_tx->tolerance);
7482    __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7483    __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7484    __put_user(host_tx->tick, &target_tx->tick);
7485    __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7486    __put_user(host_tx->jitter, &target_tx->jitter);
7487    __put_user(host_tx->shift, &target_tx->shift);
7488    __put_user(host_tx->stabil, &target_tx->stabil);
7489    __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7490    __put_user(host_tx->calcnt, &target_tx->calcnt);
7491    __put_user(host_tx->errcnt, &target_tx->errcnt);
7492    __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7493    __put_user(host_tx->tai, &target_tx->tai);
7494
7495    unlock_user_struct(target_tx, target_addr, 1);
7496    return 0;
7497}
7498#endif
7499
7500
7501#if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7502static inline abi_long target_to_host_timex64(struct timex *host_tx,
7503                                              abi_long target_addr)
7504{
7505    struct target__kernel_timex *target_tx;
7506
7507    if (copy_from_user_timeval64(&host_tx->time, target_addr +
7508                                 offsetof(struct target__kernel_timex,
7509                                          time))) {
7510        return -TARGET_EFAULT;
7511    }
7512
7513    if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7514        return -TARGET_EFAULT;
7515    }
7516
7517    __get_user(host_tx->modes, &target_tx->modes);
7518    __get_user(host_tx->offset, &target_tx->offset);
7519    __get_user(host_tx->freq, &target_tx->freq);
7520    __get_user(host_tx->maxerror, &target_tx->maxerror);
7521    __get_user(host_tx->esterror, &target_tx->esterror);
7522    __get_user(host_tx->status, &target_tx->status);
7523    __get_user(host_tx->constant, &target_tx->constant);
7524    __get_user(host_tx->precision, &target_tx->precision);
7525    __get_user(host_tx->tolerance, &target_tx->tolerance);
7526    __get_user(host_tx->tick, &target_tx->tick);
7527    __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7528    __get_user(host_tx->jitter, &target_tx->jitter);
7529    __get_user(host_tx->shift, &target_tx->shift);
7530    __get_user(host_tx->stabil, &target_tx->stabil);
7531    __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7532    __get_user(host_tx->calcnt, &target_tx->calcnt);
7533    __get_user(host_tx->errcnt, &target_tx->errcnt);
7534    __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7535    __get_user(host_tx->tai, &target_tx->tai);
7536
7537    unlock_user_struct(target_tx, target_addr, 0);
7538    return 0;
7539}
7540
7541static inline abi_long host_to_target_timex64(abi_long target_addr,
7542                                              struct timex *host_tx)
7543{
7544    struct target__kernel_timex *target_tx;
7545
7546   if (copy_to_user_timeval64(target_addr +
7547                              offsetof(struct target__kernel_timex, time),
7548                              &host_tx->time)) {
7549        return -TARGET_EFAULT;
7550    }
7551
7552    if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7553        return -TARGET_EFAULT;
7554    }
7555
7556    __put_user(host_tx->modes, &target_tx->modes);
7557    __put_user(host_tx->offset, &target_tx->offset);
7558    __put_user(host_tx->freq, &target_tx->freq);
7559    __put_user(host_tx->maxerror, &target_tx->maxerror);
7560    __put_user(host_tx->esterror, &target_tx->esterror);
7561    __put_user(host_tx->status, &target_tx->status);
7562    __put_user(host_tx->constant, &target_tx->constant);
7563    __put_user(host_tx->precision, &target_tx->precision);
7564    __put_user(host_tx->tolerance, &target_tx->tolerance);
7565    __put_user(host_tx->tick, &target_tx->tick);
7566    __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7567    __put_user(host_tx->jitter, &target_tx->jitter);
7568    __put_user(host_tx->shift, &target_tx->shift);
7569    __put_user(host_tx->stabil, &target_tx->stabil);
7570    __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7571    __put_user(host_tx->calcnt, &target_tx->calcnt);
7572    __put_user(host_tx->errcnt, &target_tx->errcnt);
7573    __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7574    __put_user(host_tx->tai, &target_tx->tai);
7575
7576    unlock_user_struct(target_tx, target_addr, 1);
7577    return 0;
7578}
7579#endif
7580
7581#ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7582#define sigev_notify_thread_id _sigev_un._tid
7583#endif
7584
7585static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7586                                               abi_ulong target_addr)
7587{
7588    struct target_sigevent *target_sevp;
7589
7590    if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7591        return -TARGET_EFAULT;
7592    }
7593
7594    /* This union is awkward on 64 bit systems because it has a 32 bit
7595     * integer and a pointer in it; we follow the conversion approach
7596     * used for handling sigval types in signal.c so the guest should get
7597     * the correct value back even if we did a 64 bit byteswap and it's
7598     * using the 32 bit integer.
7599     */
7600    host_sevp->sigev_value.sival_ptr =
7601        (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7602    host_sevp->sigev_signo =
7603        target_to_host_signal(tswap32(target_sevp->sigev_signo));
7604    host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7605    host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7606
7607    unlock_user_struct(target_sevp, target_addr, 1);
7608    return 0;
7609}
7610
7611#if defined(TARGET_NR_mlockall)
7612static inline int target_to_host_mlockall_arg(int arg)
7613{
7614    int result = 0;
7615
7616    if (arg & TARGET_MCL_CURRENT) {
7617        result |= MCL_CURRENT;
7618    }
7619    if (arg & TARGET_MCL_FUTURE) {
7620        result |= MCL_FUTURE;
7621    }
7622#ifdef MCL_ONFAULT
7623    if (arg & TARGET_MCL_ONFAULT) {
7624        result |= MCL_ONFAULT;
7625    }
7626#endif
7627
7628    return result;
7629}
7630#endif
7631
7632#if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7633     defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7634     defined(TARGET_NR_newfstatat))
7635static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7636                                             abi_ulong target_addr,
7637                                             struct stat *host_st)
7638{
7639#if defined(TARGET_ARM) && defined(TARGET_ABI32)
7640    if (cpu_env->eabi) {
7641        struct target_eabi_stat64 *target_st;
7642
7643        if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7644            return -TARGET_EFAULT;
7645        memset(target_st, 0, sizeof(struct target_eabi_stat64));
7646        __put_user(host_st->st_dev, &target_st->st_dev);
7647        __put_user(host_st->st_ino, &target_st->st_ino);
7648#ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7649        __put_user(host_st->st_ino, &target_st->__st_ino);
7650#endif
7651        __put_user(host_st->st_mode, &target_st->st_mode);
7652        __put_user(host_st->st_nlink, &target_st->st_nlink);
7653        __put_user(host_st->st_uid, &target_st->st_uid);
7654        __put_user(host_st->st_gid, &target_st->st_gid);
7655        __put_user(host_st->st_rdev, &target_st->st_rdev);
7656        __put_user(host_st->st_size, &target_st->st_size);
7657        __put_user(host_st->st_blksize, &target_st->st_blksize);
7658        __put_user(host_st->st_blocks, &target_st->st_blocks);
7659        __put_user(host_st->st_atime, &target_st->target_st_atime);
7660        __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7661        __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7662#ifdef HAVE_STRUCT_STAT_ST_ATIM
7663        __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7664        __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7665        __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7666#endif
7667        unlock_user_struct(target_st, target_addr, 1);
7668    } else
7669#endif
7670    {
7671#if defined(TARGET_HAS_STRUCT_STAT64)
7672        struct target_stat64 *target_st;
7673#else
7674        struct target_stat *target_st;
7675#endif
7676
7677        if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7678            return -TARGET_EFAULT;
7679        memset(target_st, 0, sizeof(*target_st));
7680        __put_user(host_st->st_dev, &target_st->st_dev);
7681        __put_user(host_st->st_ino, &target_st->st_ino);
7682#ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7683        __put_user(host_st->st_ino, &target_st->__st_ino);
7684#endif
7685        __put_user(host_st->st_mode, &target_st->st_mode);
7686        __put_user(host_st->st_nlink, &target_st->st_nlink);
7687        __put_user(host_st->st_uid, &target_st->st_uid);
7688        __put_user(host_st->st_gid, &target_st->st_gid);
7689        __put_user(host_st->st_rdev, &target_st->st_rdev);
7690        /* XXX: better use of kernel struct */
7691        __put_user(host_st->st_size, &target_st->st_size);
7692        __put_user(host_st->st_blksize, &target_st->st_blksize);
7693        __put_user(host_st->st_blocks, &target_st->st_blocks);
7694        __put_user(host_st->st_atime, &target_st->target_st_atime);
7695        __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7696        __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7697#ifdef HAVE_STRUCT_STAT_ST_ATIM
7698        __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7699        __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7700        __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7701#endif
7702        unlock_user_struct(target_st, target_addr, 1);
7703    }
7704
7705    return 0;
7706}
7707#endif
7708
7709#if defined(TARGET_NR_statx) && defined(__NR_statx)
7710static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7711                                            abi_ulong target_addr)
7712{
7713    struct target_statx *target_stx;
7714
7715    if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7716        return -TARGET_EFAULT;
7717    }
7718    memset(target_stx, 0, sizeof(*target_stx));
7719
7720    __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7721    __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7722    __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7723    __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7724    __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7725    __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7726    __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7727    __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7728    __put_user(host_stx->stx_size, &target_stx->stx_size);
7729    __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7730    __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7731    __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7732    __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7733    __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7734    __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7735    __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7736    __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7737    __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7738    __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7739    __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7740    __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7741    __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7742    __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7743
7744    unlock_user_struct(target_stx, target_addr, 1);
7745
7746    return 0;
7747}
7748#endif
7749
7750static int do_sys_futex(int *uaddr, int op, int val,
7751                         const struct timespec *timeout, int *uaddr2,
7752                         int val3)
7753{
7754#if HOST_LONG_BITS == 64
7755#if defined(__NR_futex)
7756    /* always a 64-bit time_t, it doesn't define _time64 version  */
7757    return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7758
7759#endif
7760#else /* HOST_LONG_BITS == 64 */
7761#if defined(__NR_futex_time64)
7762    if (sizeof(timeout->tv_sec) == 8) {
7763        /* _time64 function on 32bit arch */
7764        return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7765    }
7766#endif
7767#if defined(__NR_futex)
7768    /* old function on 32bit arch */
7769    return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7770#endif
7771#endif /* HOST_LONG_BITS == 64 */
7772    g_assert_not_reached();
7773}
7774
7775static int do_safe_futex(int *uaddr, int op, int val,
7776                         const struct timespec *timeout, int *uaddr2,
7777                         int val3)
7778{
7779#if HOST_LONG_BITS == 64
7780#if defined(__NR_futex)
7781    /* always a 64-bit time_t, it doesn't define _time64 version  */
7782    return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7783#endif
7784#else /* HOST_LONG_BITS == 64 */
7785#if defined(__NR_futex_time64)
7786    if (sizeof(timeout->tv_sec) == 8) {
7787        /* _time64 function on 32bit arch */
7788        return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7789                                           val3));
7790    }
7791#endif
7792#if defined(__NR_futex)
7793    /* old function on 32bit arch */
7794    return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7795#endif
7796#endif /* HOST_LONG_BITS == 64 */
7797    return -TARGET_ENOSYS;
7798}
7799
7800/* ??? Using host futex calls even when target atomic operations
7801   are not really atomic probably breaks things.  However implementing
7802   futexes locally would make futexes shared between multiple processes
7803   tricky.  However they're probably useless because guest atomic
7804   operations won't work either.  */
7805#if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7806static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7807                    int op, int val, target_ulong timeout,
7808                    target_ulong uaddr2, int val3)
7809{
7810    struct timespec ts, *pts = NULL;
7811    void *haddr2 = NULL;
7812    int base_op;
7813
7814    /* We assume FUTEX_* constants are the same on both host and target. */
7815#ifdef FUTEX_CMD_MASK
7816    base_op = op & FUTEX_CMD_MASK;
7817#else
7818    base_op = op;
7819#endif
7820    switch (base_op) {
7821    case FUTEX_WAIT:
7822    case FUTEX_WAIT_BITSET:
7823        val = tswap32(val);
7824        break;
7825    case FUTEX_WAIT_REQUEUE_PI:
7826        val = tswap32(val);
7827        haddr2 = g2h(cpu, uaddr2);
7828        break;
7829    case FUTEX_LOCK_PI:
7830    case FUTEX_LOCK_PI2:
7831        break;
7832    case FUTEX_WAKE:
7833    case FUTEX_WAKE_BITSET:
7834    case FUTEX_TRYLOCK_PI:
7835    case FUTEX_UNLOCK_PI:
7836        timeout = 0;
7837        break;
7838    case FUTEX_FD:
7839        val = target_to_host_signal(val);
7840        timeout = 0;
7841        break;
7842    case FUTEX_CMP_REQUEUE:
7843    case FUTEX_CMP_REQUEUE_PI:
7844        val3 = tswap32(val3);
7845        /* fall through */
7846    case FUTEX_REQUEUE:
7847    case FUTEX_WAKE_OP:
7848        /*
7849         * For these, the 4th argument is not TIMEOUT, but VAL2.
7850         * But the prototype of do_safe_futex takes a pointer, so
7851         * insert casts to satisfy the compiler.  We do not need
7852         * to tswap VAL2 since it's not compared to guest memory.
7853          */
7854        pts = (struct timespec *)(uintptr_t)timeout;
7855        timeout = 0;
7856        haddr2 = g2h(cpu, uaddr2);
7857        break;
7858    default:
7859        return -TARGET_ENOSYS;
7860    }
7861    if (timeout) {
7862        pts = &ts;
7863        if (time64
7864            ? target_to_host_timespec64(pts, timeout)
7865            : target_to_host_timespec(pts, timeout)) {
7866            return -TARGET_EFAULT;
7867        }
7868    }
7869    return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7870}
7871#endif
7872
7873#if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7874static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7875                                     abi_long handle, abi_long mount_id,
7876                                     abi_long flags)
7877{
7878    struct file_handle *target_fh;
7879    struct file_handle *fh;
7880    int mid = 0;
7881    abi_long ret;
7882    char *name;
7883    unsigned int size, total_size;
7884
7885    if (get_user_s32(size, handle)) {
7886        return -TARGET_EFAULT;
7887    }
7888
7889    name = lock_user_string(pathname);
7890    if (!name) {
7891        return -TARGET_EFAULT;
7892    }
7893
7894    total_size = sizeof(struct file_handle) + size;
7895    target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7896    if (!target_fh) {
7897        unlock_user(name, pathname, 0);
7898        return -TARGET_EFAULT;
7899    }
7900
7901    fh = g_malloc0(total_size);
7902    fh->handle_bytes = size;
7903
7904    ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7905    unlock_user(name, pathname, 0);
7906
7907    /* man name_to_handle_at(2):
7908     * Other than the use of the handle_bytes field, the caller should treat
7909     * the file_handle structure as an opaque data type
7910     */
7911
7912    memcpy(target_fh, fh, total_size);
7913    target_fh->handle_bytes = tswap32(fh->handle_bytes);
7914    target_fh->handle_type = tswap32(fh->handle_type);
7915    g_free(fh);
7916    unlock_user(target_fh, handle, total_size);
7917
7918    if (put_user_s32(mid, mount_id)) {
7919        return -TARGET_EFAULT;
7920    }
7921
7922    return ret;
7923
7924}
7925#endif
7926
7927#if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7928static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7929                                     abi_long flags)
7930{
7931    struct file_handle *target_fh;
7932    struct file_handle *fh;
7933    unsigned int size, total_size;
7934    abi_long ret;
7935
7936    if (get_user_s32(size, handle)) {
7937        return -TARGET_EFAULT;
7938    }
7939
7940    total_size = sizeof(struct file_handle) + size;
7941    target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7942    if (!target_fh) {
7943        return -TARGET_EFAULT;
7944    }
7945
7946    fh = g_memdup(target_fh, total_size);
7947    fh->handle_bytes = size;
7948    fh->handle_type = tswap32(target_fh->handle_type);
7949
7950    ret = get_errno(open_by_handle_at(mount_fd, fh,
7951                    target_to_host_bitmask(flags, fcntl_flags_tbl)));
7952
7953    g_free(fh);
7954
7955    unlock_user(target_fh, handle, total_size);
7956
7957    return ret;
7958}
7959#endif
7960
7961#if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7962
7963static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7964{
7965    int host_flags;
7966    target_sigset_t *target_mask;
7967    sigset_t host_mask;
7968    abi_long ret;
7969
7970    if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7971        return -TARGET_EINVAL;
7972    }
7973    if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7974        return -TARGET_EFAULT;
7975    }
7976
7977    target_to_host_sigset(&host_mask, target_mask);
7978
7979    host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7980
7981    ret = get_errno(signalfd(fd, &host_mask, host_flags));
7982    if (ret >= 0) {
7983        fd_trans_register(ret, &target_signalfd_trans);
7984    }
7985
7986    unlock_user_struct(target_mask, mask, 0);
7987
7988    return ret;
7989}
7990#endif
7991
7992/* Map host to target signal numbers for the wait family of syscalls.
7993   Assume all other status bits are the same.  */
7994int host_to_target_waitstatus(int status)
7995{
7996    if (WIFSIGNALED(status)) {
7997        return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7998    }
7999    if (WIFSTOPPED(status)) {
8000        return (host_to_target_signal(WSTOPSIG(status)) << 8)
8001               | (status & 0xff);
8002    }
8003    return status;
8004}
8005
8006static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8007{
8008    CPUState *cpu = env_cpu(cpu_env);
8009    struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8010    int i;
8011
8012    for (i = 0; i < bprm->argc; i++) {
8013        size_t len = strlen(bprm->argv[i]) + 1;
8014
8015        if (write(fd, bprm->argv[i], len) != len) {
8016            return -1;
8017        }
8018    }
8019
8020    return 0;
8021}
8022
8023static int open_self_maps(CPUArchState *cpu_env, int fd)
8024{
8025    CPUState *cpu = env_cpu(cpu_env);
8026    TaskState *ts = cpu->opaque;
8027    GSList *map_info = read_self_maps();
8028    GSList *s;
8029    int count;
8030
8031    for (s = map_info; s; s = g_slist_next(s)) {
8032        MapInfo *e = (MapInfo *) s->data;
8033
8034        if (h2g_valid(e->start)) {
8035            unsigned long min = e->start;
8036            unsigned long max = e->end;
8037            int flags = page_get_flags(h2g(min));
8038            const char *path;
8039
8040            max = h2g_valid(max - 1) ?
8041                max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8042
8043            if (page_check_range(h2g(min), max - min, flags) == -1) {
8044                continue;
8045            }
8046
8047#ifdef TARGET_HPPA
8048            if (h2g(max) == ts->info->stack_limit) {
8049#else
8050            if (h2g(min) == ts->info->stack_limit) {
8051#endif
8052                path = "[stack]";
8053            } else {
8054                path = e->path;
8055            }
8056
8057            count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8058                            " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8059                            h2g(min), h2g(max - 1) + 1,
8060                            (flags & PAGE_READ) ? 'r' : '-',
8061                            (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8062                            (flags & PAGE_EXEC) ? 'x' : '-',
8063                            e->is_priv ? 'p' : 's',
8064                            (uint64_t) e->offset, e->dev, e->inode);
8065            if (path) {
8066                dprintf(fd, "%*s%s\n", 73 - count, "", path);
8067            } else {
8068                dprintf(fd, "\n");
8069            }
8070        }
8071    }
8072
8073    free_self_maps(map_info);
8074
8075#ifdef TARGET_VSYSCALL_PAGE
8076    /*
8077     * We only support execution from the vsyscall page.
8078     * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8079     */
8080    count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8081                    " --xp 00000000 00:00 0",
8082                    TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8083    dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8084#endif
8085
8086    return 0;
8087}
8088
8089static int open_self_stat(CPUArchState *cpu_env, int fd)
8090{
8091    CPUState *cpu = env_cpu(cpu_env);
8092    TaskState *ts = cpu->opaque;
8093    g_autoptr(GString) buf = g_string_new(NULL);
8094    int i;
8095
8096    for (i = 0; i < 44; i++) {
8097        if (i == 0) {
8098            /* pid */
8099            g_string_printf(buf, FMT_pid " ", getpid());
8100        } else if (i == 1) {
8101            /* app name */
8102            gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8103            bin = bin ? bin + 1 : ts->bprm->argv[0];
8104            g_string_printf(buf, "(%.15s) ", bin);
8105        } else if (i == 3) {
8106            /* ppid */
8107            g_string_printf(buf, FMT_pid " ", getppid());
8108        } else if (i == 21) {
8109            /* starttime */
8110            g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8111        } else if (i == 27) {
8112            /* stack bottom */
8113            g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8114        } else {
8115            /* for the rest, there is MasterCard */
8116            g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8117        }
8118
8119        if (write(fd, buf->str, buf->len) != buf->len) {
8120            return -1;
8121        }
8122    }
8123
8124    return 0;
8125}
8126
8127static int open_self_auxv(CPUArchState *cpu_env, int fd)
8128{
8129    CPUState *cpu = env_cpu(cpu_env);
8130    TaskState *ts = cpu->opaque;
8131    abi_ulong auxv = ts->info->saved_auxv;
8132    abi_ulong len = ts->info->auxv_len;
8133    char *ptr;
8134
8135    /*
8136     * Auxiliary vector is stored in target process stack.
8137     * read in whole auxv vector and copy it to file
8138     */
8139    ptr = lock_user(VERIFY_READ, auxv, len, 0);
8140    if (ptr != NULL) {
8141        while (len > 0) {
8142            ssize_t r;
8143            r = write(fd, ptr, len);
8144            if (r <= 0) {
8145                break;
8146            }
8147            len -= r;
8148            ptr += r;
8149        }
8150        lseek(fd, 0, SEEK_SET);
8151        unlock_user(ptr, auxv, len);
8152    }
8153
8154    return 0;
8155}
8156
8157static int is_proc_myself(const char *filename, const char *entry)
8158{
8159    if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8160        filename += strlen("/proc/");
8161        if (!strncmp(filename, "self/", strlen("self/"))) {
8162            filename += strlen("self/");
8163        } else if (*filename >= '1' && *filename <= '9') {
8164            char myself[80];
8165            snprintf(myself, sizeof(myself), "%d/", getpid());
8166            if (!strncmp(filename, myself, strlen(myself))) {
8167                filename += strlen(myself);
8168            } else {
8169                return 0;
8170            }
8171        } else {
8172            return 0;
8173        }
8174        if (!strcmp(filename, entry)) {
8175            return 1;
8176        }
8177    }
8178    return 0;
8179}
8180
8181static void excp_dump_file(FILE *logfile, CPUArchState *env,
8182                      const char *fmt, int code)
8183{
8184    if (logfile) {
8185        CPUState *cs = env_cpu(env);
8186
8187        fprintf(logfile, fmt, code);
8188        fprintf(logfile, "Failing executable: %s\n", exec_path);
8189        cpu_dump_state(cs, logfile, 0);
8190        open_self_maps(env, fileno(logfile));
8191    }
8192}
8193
8194void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8195{
8196    /* dump to console */
8197    excp_dump_file(stderr, env, fmt, code);
8198
8199    /* dump to log file */
8200    if (qemu_log_separate()) {
8201        FILE *logfile = qemu_log_trylock();
8202
8203        excp_dump_file(logfile, env, fmt, code);
8204        qemu_log_unlock(logfile);
8205    }
8206}
8207
8208#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8209    defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8210static int is_proc(const char *filename, const char *entry)
8211{
8212    return strcmp(filename, entry) == 0;
8213}
8214#endif
8215
8216#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8217static int open_net_route(CPUArchState *cpu_env, int fd)
8218{
8219    FILE *fp;
8220    char *line = NULL;
8221    size_t len = 0;
8222    ssize_t read;
8223
8224    fp = fopen("/proc/net/route", "r");
8225    if (fp == NULL) {
8226        return -1;
8227    }
8228
8229    /* read header */
8230
8231    read = getline(&line, &len, fp);
8232    dprintf(fd, "%s", line);
8233
8234    /* read routes */
8235
8236    while ((read = getline(&line, &len, fp)) != -1) {
8237        char iface[16];
8238        uint32_t dest, gw, mask;
8239        unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8240        int fields;
8241
8242        fields = sscanf(line,
8243                        "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8244                        iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8245                        &mask, &mtu, &window, &irtt);
8246        if (fields != 11) {
8247            continue;
8248        }
8249        dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8250                iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8251                metric, tswap32(mask), mtu, window, irtt);
8252    }
8253
8254    free(line);
8255    fclose(fp);
8256
8257    return 0;
8258}
8259#endif
8260
8261#if defined(TARGET_SPARC)
8262static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8263{
8264    dprintf(fd, "type\t\t: sun4u\n");
8265    return 0;
8266}
8267#endif
8268
8269#if defined(TARGET_HPPA)
8270static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8271{
8272    dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8273    dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8274    dprintf(fd, "capabilities\t: os32\n");
8275    dprintf(fd, "model\t\t: 9000/778/B160L\n");
8276    dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8277    return 0;
8278}
8279#endif
8280
8281#if defined(TARGET_M68K)
8282static int open_hardware(CPUArchState *cpu_env, int fd)
8283{
8284    dprintf(fd, "Model:\t\tqemu-m68k\n");
8285    return 0;
8286}
8287#endif
8288
8289static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8290{
8291    struct fake_open {
8292        const char *filename;
8293        int (*fill)(CPUArchState *cpu_env, int fd);
8294        int (*cmp)(const char *s1, const char *s2);
8295    };
8296    const struct fake_open *fake_open;
8297    static const struct fake_open fakes[] = {
8298        { "maps", open_self_maps, is_proc_myself },
8299        { "stat", open_self_stat, is_proc_myself },
8300        { "auxv", open_self_auxv, is_proc_myself },
8301        { "cmdline", open_self_cmdline, is_proc_myself },
8302#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8303        { "/proc/net/route", open_net_route, is_proc },
8304#endif
8305#if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8306        { "/proc/cpuinfo", open_cpuinfo, is_proc },
8307#endif
8308#if defined(TARGET_M68K)
8309        { "/proc/hardware", open_hardware, is_proc },
8310#endif
8311        { NULL, NULL, NULL }
8312    };
8313
8314    if (is_proc_myself(pathname, "exe")) {
8315        return safe_openat(dirfd, exec_path, flags, mode);
8316    }
8317
8318    for (fake_open = fakes; fake_open->filename; fake_open++) {
8319        if (fake_open->cmp(pathname, fake_open->filename)) {
8320            break;
8321        }
8322    }
8323
8324    if (fake_open->filename) {
8325        const char *tmpdir;
8326        char filename[PATH_MAX];
8327        int fd, r;
8328
8329        fd = memfd_create("qemu-open", 0);
8330        if (fd < 0) {
8331            if (errno != ENOSYS) {
8332                return fd;
8333            }
8334            /* create temporary file to map stat to */
8335            tmpdir = getenv("TMPDIR");
8336            if (!tmpdir)
8337                tmpdir = "/tmp";
8338            snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8339            fd = mkstemp(filename);
8340            if (fd < 0) {
8341                return fd;
8342            }
8343            unlink(filename);
8344        }
8345
8346        if ((r = fake_open->fill(cpu_env, fd))) {
8347            int e = errno;
8348            close(fd);
8349            errno = e;
8350            return r;
8351        }
8352        lseek(fd, 0, SEEK_SET);
8353
8354        return fd;
8355    }
8356
8357    return safe_openat(dirfd, path(pathname), flags, mode);
8358}
8359
8360#define TIMER_MAGIC 0x0caf0000
8361#define TIMER_MAGIC_MASK 0xffff0000
8362
8363/* Convert QEMU provided timer ID back to internal 16bit index format */
8364static target_timer_t get_timer_id(abi_long arg)
8365{
8366    target_timer_t timerid = arg;
8367
8368    if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8369        return -TARGET_EINVAL;
8370    }
8371
8372    timerid &= 0xffff;
8373
8374    if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8375        return -TARGET_EINVAL;
8376    }
8377
8378    return timerid;
8379}
8380
8381static int target_to_host_cpu_mask(unsigned long *host_mask,
8382                                   size_t host_size,
8383                                   abi_ulong target_addr,
8384                                   size_t target_size)
8385{
8386    unsigned target_bits = sizeof(abi_ulong) * 8;
8387    unsigned host_bits = sizeof(*host_mask) * 8;
8388    abi_ulong *target_mask;
8389    unsigned i, j;
8390
8391    assert(host_size >= target_size);
8392
8393    target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8394    if (!target_mask) {
8395        return -TARGET_EFAULT;
8396    }
8397    memset(host_mask, 0, host_size);
8398
8399    for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8400        unsigned bit = i * target_bits;
8401        abi_ulong val;
8402
8403        __get_user(val, &target_mask[i]);
8404        for (j = 0; j < target_bits; j++, bit++) {
8405            if (val & (1UL << j)) {
8406                host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8407            }
8408        }
8409    }
8410
8411    unlock_user(target_mask, target_addr, 0);
8412    return 0;
8413}
8414
8415static int host_to_target_cpu_mask(const unsigned long *host_mask,
8416                                   size_t host_size,
8417                                   abi_ulong target_addr,
8418                                   size_t target_size)
8419{
8420    unsigned target_bits = sizeof(abi_ulong) * 8;
8421    unsigned host_bits = sizeof(*host_mask) * 8;
8422    abi_ulong *target_mask;
8423    unsigned i, j;
8424
8425    assert(host_size >= target_size);
8426
8427    target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8428    if (!target_mask) {
8429        return -TARGET_EFAULT;
8430    }
8431
8432    for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8433        unsigned bit = i * target_bits;
8434        abi_ulong val = 0;
8435
8436        for (j = 0; j < target_bits; j++, bit++) {
8437            if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8438                val |= 1UL << j;
8439            }
8440        }
8441        __put_user(val, &target_mask[i]);
8442    }
8443
8444    unlock_user(target_mask, target_addr, target_size);
8445    return 0;
8446}
8447
8448#ifdef TARGET_NR_getdents
8449static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8450{
8451    g_autofree void *hdirp = NULL;
8452    void *tdirp;
8453    int hlen, hoff, toff;
8454    int hreclen, treclen;
8455    off64_t prev_diroff = 0;
8456
8457    hdirp = g_try_malloc(count);
8458    if (!hdirp) {
8459        return -TARGET_ENOMEM;
8460    }
8461
8462#ifdef EMULATE_GETDENTS_WITH_GETDENTS
8463    hlen = sys_getdents(dirfd, hdirp, count);
8464#else
8465    hlen = sys_getdents64(dirfd, hdirp, count);
8466#endif
8467
8468    hlen = get_errno(hlen);
8469    if (is_error(hlen)) {
8470        return hlen;
8471    }
8472
8473    tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8474    if (!tdirp) {
8475        return -TARGET_EFAULT;
8476    }
8477
8478    for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8479#ifdef EMULATE_GETDENTS_WITH_GETDENTS
8480        struct linux_dirent *hde = hdirp + hoff;
8481#else
8482        struct linux_dirent64 *hde = hdirp + hoff;
8483#endif
8484        struct target_dirent *tde = tdirp + toff;
8485        int namelen;
8486        uint8_t type;
8487
8488        namelen = strlen(hde->d_name);
8489        hreclen = hde->d_reclen;
8490        treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8491        treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8492
8493        if (toff + treclen > count) {
8494            /*
8495             * If the host struct is smaller than the target struct, or
8496             * requires less alignment and thus packs into less space,
8497             * then the host can return more entries than we can pass
8498             * on to the guest.
8499             */
8500            if (toff == 0) {
8501                toff = -TARGET_EINVAL; /* result buffer is too small */
8502                break;
8503            }
8504            /*
8505             * Return what we have, resetting the file pointer to the
8506             * location of the first record not returned.
8507             */
8508            lseek64(dirfd, prev_diroff, SEEK_SET);
8509            break;
8510        }
8511
8512        prev_diroff = hde->d_off;
8513        tde->d_ino = tswapal(hde->d_ino);
8514        tde->d_off = tswapal(hde->d_off);
8515        tde->d_reclen = tswap16(treclen);
8516        memcpy(tde->d_name, hde->d_name, namelen + 1);
8517
8518        /*
8519         * The getdents type is in what was formerly a padding byte at the
8520         * end of the structure.
8521         */
8522#ifdef EMULATE_GETDENTS_WITH_GETDENTS
8523        type = *((uint8_t *)hde + hreclen - 1);
8524#else
8525        type = hde->d_type;
8526#endif
8527        *((uint8_t *)tde + treclen - 1) = type;
8528    }
8529
8530    unlock_user(tdirp, arg2, toff);
8531    return toff;
8532}
8533#endif /* TARGET_NR_getdents */
8534
8535#if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8536static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8537{
8538    g_autofree void *hdirp = NULL;
8539    void *tdirp;
8540    int hlen, hoff, toff;
8541    int hreclen, treclen;
8542    off64_t prev_diroff = 0;
8543
8544    hdirp = g_try_malloc(count);
8545    if (!hdirp) {
8546        return -TARGET_ENOMEM;
8547    }
8548
8549    hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8550    if (is_error(hlen)) {
8551        return hlen;
8552    }
8553
8554    tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8555    if (!tdirp) {
8556        return -TARGET_EFAULT;
8557    }
8558
8559    for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8560        struct linux_dirent64 *hde = hdirp + hoff;
8561        struct target_dirent64 *tde = tdirp + toff;
8562        int namelen;
8563
8564        namelen = strlen(hde->d_name) + 1;
8565        hreclen = hde->d_reclen;
8566        treclen = offsetof(struct target_dirent64, d_name) + namelen;
8567        treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8568
8569        if (toff + treclen > count) {
8570            /*
8571             * If the host struct is smaller than the target struct, or
8572             * requires less alignment and thus packs into less space,
8573             * then the host can return more entries than we can pass
8574             * on to the guest.
8575             */
8576            if (toff == 0) {
8577                toff = -TARGET_EINVAL; /* result buffer is too small */
8578                break;
8579            }
8580            /*
8581             * Return what we have, resetting the file pointer to the
8582             * location of the first record not returned.
8583             */
8584            lseek64(dirfd, prev_diroff, SEEK_SET);
8585            break;
8586        }
8587
8588        prev_diroff = hde->d_off;
8589        tde->d_ino = tswap64(hde->d_ino);
8590        tde->d_off = tswap64(hde->d_off);
8591        tde->d_reclen = tswap16(treclen);
8592        tde->d_type = hde->d_type;
8593        memcpy(tde->d_name, hde->d_name, namelen);
8594    }
8595
8596    unlock_user(tdirp, arg2, toff);
8597    return toff;
8598}
8599#endif /* TARGET_NR_getdents64 */
8600
8601#if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8602_syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8603#endif
8604
8605/* This is an internal helper for do_syscall so that it is easier
8606 * to have a single return point, so that actions, such as logging
8607 * of syscall results, can be performed.
8608 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8609 */
8610static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8611                            abi_long arg2, abi_long arg3, abi_long arg4,
8612                            abi_long arg5, abi_long arg6, abi_long arg7,
8613                            abi_long arg8)
8614{
8615    CPUState *cpu = env_cpu(cpu_env);
8616    abi_long ret;
8617#if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8618    || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8619    || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8620    || defined(TARGET_NR_statx)
8621    struct stat st;
8622#endif
8623#if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8624    || defined(TARGET_NR_fstatfs)
8625    struct statfs stfs;
8626#endif
8627    void *p;
8628
8629    switch(num) {
8630    case TARGET_NR_exit:
8631        /* In old applications this may be used to implement _exit(2).
8632           However in threaded applications it is used for thread termination,
8633           and _exit_group is used for application termination.
8634           Do thread termination if we have more then one thread.  */
8635
8636        if (block_signals()) {
8637            return -QEMU_ERESTARTSYS;
8638        }
8639
8640        pthread_mutex_lock(&clone_lock);
8641
8642        if (CPU_NEXT(first_cpu)) {
8643            TaskState *ts = cpu->opaque;
8644
8645            object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8646            object_unref(OBJECT(cpu));
8647            /*
8648             * At this point the CPU should be unrealized and removed
8649             * from cpu lists. We can clean-up the rest of the thread
8650             * data without the lock held.
8651             */
8652
8653            pthread_mutex_unlock(&clone_lock);
8654
8655            if (ts->child_tidptr) {
8656                put_user_u32(0, ts->child_tidptr);
8657                do_sys_futex(g2h(cpu, ts->child_tidptr),
8658                             FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8659            }
8660            thread_cpu = NULL;
8661            g_free(ts);
8662            rcu_unregister_thread();
8663            pthread_exit(NULL);
8664        }
8665
8666        pthread_mutex_unlock(&clone_lock);
8667        preexit_cleanup(cpu_env, arg1);
8668        _exit(arg1);
8669        return 0; /* avoid warning */
8670    case TARGET_NR_read:
8671        if (arg2 == 0 && arg3 == 0) {
8672            return get_errno(safe_read(arg1, 0, 0));
8673        } else {
8674            if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8675                return -TARGET_EFAULT;
8676            ret = get_errno(safe_read(arg1, p, arg3));
8677            if (ret >= 0 &&
8678                fd_trans_host_to_target_data(arg1)) {
8679                ret = fd_trans_host_to_target_data(arg1)(p, ret);
8680            }
8681            unlock_user(p, arg2, ret);
8682        }
8683        return ret;
8684    case TARGET_NR_write:
8685        if (arg2 == 0 && arg3 == 0) {
8686            return get_errno(safe_write(arg1, 0, 0));
8687        }
8688        if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8689            return -TARGET_EFAULT;
8690        if (fd_trans_target_to_host_data(arg1)) {
8691            void *copy = g_malloc(arg3);
8692            memcpy(copy, p, arg3);
8693            ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8694            if (ret >= 0) {
8695                ret = get_errno(safe_write(arg1, copy, ret));
8696            }
8697            g_free(copy);
8698        } else {
8699            ret = get_errno(safe_write(arg1, p, arg3));
8700        }
8701        unlock_user(p, arg2, 0);
8702        return ret;
8703
8704#ifdef TARGET_NR_open
8705    case TARGET_NR_open:
8706        if (!(p = lock_user_string(arg1)))
8707            return -TARGET_EFAULT;
8708        ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8709                                  target_to_host_bitmask(arg2, fcntl_flags_tbl),
8710                                  arg3));
8711        fd_trans_unregister(ret);
8712        unlock_user(p, arg1, 0);
8713        return ret;
8714#endif
8715    case TARGET_NR_openat:
8716        if (!(p = lock_user_string(arg2)))
8717            return -TARGET_EFAULT;
8718        ret = get_errno(do_openat(cpu_env, arg1, p,
8719                                  target_to_host_bitmask(arg3, fcntl_flags_tbl),
8720                                  arg4));
8721        fd_trans_unregister(ret);
8722        unlock_user(p, arg2, 0);
8723        return ret;
8724#if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8725    case TARGET_NR_name_to_handle_at:
8726        ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8727        return ret;
8728#endif
8729#if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8730    case TARGET_NR_open_by_handle_at:
8731        ret = do_open_by_handle_at(arg1, arg2, arg3);
8732        fd_trans_unregister(ret);
8733        return ret;
8734#endif
8735#if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8736    case TARGET_NR_pidfd_open:
8737        return get_errno(pidfd_open(arg1, arg2));
8738#endif
8739#if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8740    case TARGET_NR_pidfd_send_signal:
8741        {
8742            siginfo_t uinfo, *puinfo;
8743
8744            if (arg3) {
8745                p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8746                if (!p) {
8747                    return -TARGET_EFAULT;
8748                 }
8749                 target_to_host_siginfo(&uinfo, p);
8750                 unlock_user(p, arg3, 0);
8751                 puinfo = &uinfo;
8752            } else {
8753                 puinfo = NULL;
8754            }
8755            ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8756                                              puinfo, arg4));
8757        }
8758        return ret;
8759#endif
8760#if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8761    case TARGET_NR_pidfd_getfd:
8762        return get_errno(pidfd_getfd(arg1, arg2, arg3));
8763#endif
8764    case TARGET_NR_close:
8765        fd_trans_unregister(arg1);
8766        return get_errno(close(arg1));
8767#if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8768    case TARGET_NR_close_range:
8769        ret = get_errno(sys_close_range(arg1, arg2, arg3));
8770        if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8771            abi_long fd, maxfd;
8772            maxfd = MIN(arg2, target_fd_max);
8773            for (fd = arg1; fd < maxfd; fd++) {
8774                fd_trans_unregister(fd);
8775            }
8776        }
8777        return ret;
8778#endif
8779
8780    case TARGET_NR_brk:
8781        return do_brk(arg1);
8782#ifdef TARGET_NR_fork
8783    case TARGET_NR_fork:
8784        return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8785#endif
8786#ifdef TARGET_NR_waitpid
8787    case TARGET_NR_waitpid:
8788        {
8789            int status;
8790            ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8791            if (!is_error(ret) && arg2 && ret
8792                && put_user_s32(host_to_target_waitstatus(status), arg2))
8793                return -TARGET_EFAULT;
8794        }
8795        return ret;
8796#endif
8797#ifdef TARGET_NR_waitid
8798    case TARGET_NR_waitid:
8799        {
8800            siginfo_t info;
8801            info.si_pid = 0;
8802            ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8803            if (!is_error(ret) && arg3 && info.si_pid != 0) {
8804                if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8805                    return -TARGET_EFAULT;
8806                host_to_target_siginfo(p, &info);
8807                unlock_user(p, arg3, sizeof(target_siginfo_t));
8808            }
8809        }
8810        return ret;
8811#endif
8812#ifdef TARGET_NR_creat /* not on alpha */
8813    case TARGET_NR_creat:
8814        if (!(p = lock_user_string(arg1)))
8815            return -TARGET_EFAULT;
8816        ret = get_errno(creat(p, arg2));
8817        fd_trans_unregister(ret);
8818        unlock_user(p, arg1, 0);
8819        return ret;
8820#endif
8821#ifdef TARGET_NR_link
8822    case TARGET_NR_link:
8823        {
8824            void * p2;
8825            p = lock_user_string(arg1);
8826            p2 = lock_user_string(arg2);
8827            if (!p || !p2)
8828                ret = -TARGET_EFAULT;
8829            else
8830                ret = get_errno(link(p, p2));
8831            unlock_user(p2, arg2, 0);
8832            unlock_user(p, arg1, 0);
8833        }
8834        return ret;
8835#endif
8836#if defined(TARGET_NR_linkat)
8837    case TARGET_NR_linkat:
8838        {
8839            void * p2 = NULL;
8840            if (!arg2 || !arg4)
8841                return -TARGET_EFAULT;
8842            p  = lock_user_string(arg2);
8843            p2 = lock_user_string(arg4);
8844            if (!p || !p2)
8845                ret = -TARGET_EFAULT;
8846            else
8847                ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8848            unlock_user(p, arg2, 0);
8849            unlock_user(p2, arg4, 0);
8850        }
8851        return ret;
8852#endif
8853#ifdef TARGET_NR_unlink
8854    case TARGET_NR_unlink:
8855        if (!(p = lock_user_string(arg1)))
8856            return -TARGET_EFAULT;
8857        ret = get_errno(unlink(p));
8858        unlock_user(p, arg1, 0);
8859        return ret;
8860#endif
8861#if defined(TARGET_NR_unlinkat)
8862    case TARGET_NR_unlinkat:
8863        if (!(p = lock_user_string(arg2)))
8864            return -TARGET_EFAULT;
8865        ret = get_errno(unlinkat(arg1, p, arg3));
8866        unlock_user(p, arg2, 0);
8867        return ret;
8868#endif
8869    case TARGET_NR_execve:
8870        {
8871            char **argp, **envp;
8872            int argc, envc;
8873            abi_ulong gp;
8874            abi_ulong guest_argp;
8875            abi_ulong guest_envp;
8876            abi_ulong addr;
8877            char **q;
8878
8879            argc = 0;
8880            guest_argp = arg2;
8881            for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8882                if (get_user_ual(addr, gp))
8883                    return -TARGET_EFAULT;
8884                if (!addr)
8885                    break;
8886                argc++;
8887            }
8888            envc = 0;
8889            guest_envp = arg3;
8890            for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8891                if (get_user_ual(addr, gp))
8892                    return -TARGET_EFAULT;
8893                if (!addr)
8894                    break;
8895                envc++;
8896            }
8897
8898            argp = g_new0(char *, argc + 1);
8899            envp = g_new0(char *, envc + 1);
8900
8901            for (gp = guest_argp, q = argp; gp;
8902                  gp += sizeof(abi_ulong), q++) {
8903                if (get_user_ual(addr, gp))
8904                    goto execve_efault;
8905                if (!addr)
8906                    break;
8907                if (!(*q = lock_user_string(addr)))
8908                    goto execve_efault;
8909            }
8910            *q = NULL;
8911
8912            for (gp = guest_envp, q = envp; gp;
8913                  gp += sizeof(abi_ulong), q++) {
8914                if (get_user_ual(addr, gp))
8915                    goto execve_efault;
8916                if (!addr)
8917                    break;
8918                if (!(*q = lock_user_string(addr)))
8919                    goto execve_efault;
8920            }
8921            *q = NULL;
8922
8923            if (!(p = lock_user_string(arg1)))
8924                goto execve_efault;
8925            /* Although execve() is not an interruptible syscall it is
8926             * a special case where we must use the safe_syscall wrapper:
8927             * if we allow a signal to happen before we make the host
8928             * syscall then we will 'lose' it, because at the point of
8929             * execve the process leaves QEMU's control. So we use the
8930             * safe syscall wrapper to ensure that we either take the
8931             * signal as a guest signal, or else it does not happen
8932             * before the execve completes and makes it the other
8933             * program's problem.
8934             */
8935            if (is_proc_myself(p, "exe")) {
8936                ret = get_errno(safe_execve(exec_path, argp, envp));
8937            } else {
8938                ret = get_errno(safe_execve(p, argp, envp));
8939            }
8940            unlock_user(p, arg1, 0);
8941
8942            goto execve_end;
8943
8944        execve_efault:
8945            ret = -TARGET_EFAULT;
8946
8947        execve_end:
8948            for (gp = guest_argp, q = argp; *q;
8949                  gp += sizeof(abi_ulong), q++) {
8950                if (get_user_ual(addr, gp)
8951                    || !addr)
8952                    break;
8953                unlock_user(*q, addr, 0);
8954            }
8955            for (gp = guest_envp, q = envp; *q;
8956                  gp += sizeof(abi_ulong), q++) {
8957                if (get_user_ual(addr, gp)
8958                    || !addr)
8959                    break;
8960                unlock_user(*q, addr, 0);
8961            }
8962
8963            g_free(argp);
8964            g_free(envp);
8965        }
8966        return ret;
8967    case TARGET_NR_chdir:
8968        if (!(p = lock_user_string(arg1)))
8969            return -TARGET_EFAULT;
8970        ret = get_errno(chdir(p));
8971        unlock_user(p, arg1, 0);
8972        return ret;
8973#ifdef TARGET_NR_time
8974    case TARGET_NR_time:
8975        {
8976            time_t host_time;
8977            ret = get_errno(time(&host_time));
8978            if (!is_error(ret)
8979                && arg1
8980                && put_user_sal(host_time, arg1))
8981                return -TARGET_EFAULT;
8982        }
8983        return ret;
8984#endif
8985#ifdef TARGET_NR_mknod
8986    case TARGET_NR_mknod:
8987        if (!(p = lock_user_string(arg1)))
8988            return -TARGET_EFAULT;
8989        ret = get_errno(mknod(p, arg2, arg3));
8990        unlock_user(p, arg1, 0);
8991        return ret;
8992#endif
8993#if defined(TARGET_NR_mknodat)
8994    case TARGET_NR_mknodat:
8995        if (!(p = lock_user_string(arg2)))
8996            return -TARGET_EFAULT;
8997        ret = get_errno(mknodat(arg1, p, arg3, arg4));
8998        unlock_user(p, arg2, 0);
8999        return ret;
9000#endif
9001#ifdef TARGET_NR_chmod
9002    case TARGET_NR_chmod:
9003        if (!(p = lock_user_string(arg1)))
9004            return -TARGET_EFAULT;
9005        ret = get_errno(chmod(p, arg2));
9006        unlock_user(p, arg1, 0);
9007        return ret;
9008#endif
9009#ifdef TARGET_NR_lseek
9010    case TARGET_NR_lseek:
9011        return get_errno(lseek(arg1, arg2, arg3));
9012#endif
9013#if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9014    /* Alpha specific */
9015    case TARGET_NR_getxpid:
9016        cpu_env->ir[IR_A4] = getppid();
9017        return get_errno(getpid());
9018#endif
9019#ifdef TARGET_NR_getpid
9020    case TARGET_NR_getpid:
9021        return get_errno(getpid());
9022#endif
9023    case TARGET_NR_mount:
9024        {
9025            /* need to look at the data field */
9026            void *p2, *p3;
9027
9028            if (arg1) {
9029                p = lock_user_string(arg1);
9030                if (!p) {
9031                    return -TARGET_EFAULT;
9032                }
9033            } else {
9034                p = NULL;
9035            }
9036
9037            p2 = lock_user_string(arg2);
9038            if (!p2) {
9039                if (arg1) {
9040                    unlock_user(p, arg1, 0);
9041                }
9042                return -TARGET_EFAULT;
9043            }
9044
9045            if (arg3) {
9046                p3 = lock_user_string(arg3);
9047                if (!p3) {
9048                    if (arg1) {
9049                        unlock_user(p, arg1, 0);
9050                    }
9051                    unlock_user(p2, arg2, 0);
9052                    return -TARGET_EFAULT;
9053                }
9054            } else {
9055                p3 = NULL;
9056            }
9057
9058            /* FIXME - arg5 should be locked, but it isn't clear how to
9059             * do that since it's not guaranteed to be a NULL-terminated
9060             * string.
9061             */
9062            if (!arg5) {
9063                ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9064            } else {
9065                ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9066            }
9067            ret = get_errno(ret);
9068
9069            if (arg1) {
9070                unlock_user(p, arg1, 0);
9071            }
9072            unlock_user(p2, arg2, 0);
9073            if (arg3) {
9074                unlock_user(p3, arg3, 0);
9075            }
9076        }
9077        return ret;
9078#if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9079#if defined(TARGET_NR_umount)
9080    case TARGET_NR_umount:
9081#endif
9082#if defined(TARGET_NR_oldumount)
9083    case TARGET_NR_oldumount:
9084#endif
9085        if (!(p = lock_user_string(arg1)))
9086            return -TARGET_EFAULT;
9087        ret = get_errno(umount(p));
9088        unlock_user(p, arg1, 0);
9089        return ret;
9090#endif
9091#ifdef TARGET_NR_stime /* not on alpha */
9092    case TARGET_NR_stime:
9093        {
9094            struct timespec ts;
9095            ts.tv_nsec = 0;
9096            if (get_user_sal(ts.tv_sec, arg1)) {
9097                return -TARGET_EFAULT;
9098            }
9099            return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9100        }
9101#endif
9102#ifdef TARGET_NR_alarm /* not on alpha */
9103    case TARGET_NR_alarm:
9104        return alarm(arg1);
9105#endif
9106#ifdef TARGET_NR_pause /* not on alpha */
9107    case TARGET_NR_pause:
9108        if (!block_signals()) {
9109            sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9110        }
9111        return -TARGET_EINTR;
9112#endif
9113#ifdef TARGET_NR_utime
9114    case TARGET_NR_utime:
9115        {
9116            struct utimbuf tbuf, *host_tbuf;
9117            struct target_utimbuf *target_tbuf;
9118            if (arg2) {
9119                if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9120                    return -TARGET_EFAULT;
9121                tbuf.actime = tswapal(target_tbuf->actime);
9122                tbuf.modtime = tswapal(target_tbuf->modtime);
9123                unlock_user_struct(target_tbuf, arg2, 0);
9124                host_tbuf = &tbuf;
9125            } else {
9126                host_tbuf = NULL;
9127            }
9128            if (!(p = lock_user_string(arg1)))
9129                return -TARGET_EFAULT;
9130            ret = get_errno(utime(p, host_tbuf));
9131            unlock_user(p, arg1, 0);
9132        }
9133        return ret;
9134#endif
9135#ifdef TARGET_NR_utimes
9136    case TARGET_NR_utimes:
9137        {
9138            struct timeval *tvp, tv[2];
9139            if (arg2) {
9140                if (copy_from_user_timeval(&tv[0], arg2)
9141                    || copy_from_user_timeval(&tv[1],
9142                                              arg2 + sizeof(struct target_timeval)))
9143                    return -TARGET_EFAULT;
9144                tvp = tv;
9145            } else {
9146                tvp = NULL;
9147            }
9148            if (!(p = lock_user_string(arg1)))
9149                return -TARGET_EFAULT;
9150            ret = get_errno(utimes(p, tvp));
9151            unlock_user(p, arg1, 0);
9152        }
9153        return ret;
9154#endif
9155#if defined(TARGET_NR_futimesat)
9156    case TARGET_NR_futimesat:
9157        {
9158            struct timeval *tvp, tv[2];
9159            if (arg3) {
9160                if (copy_from_user_timeval(&tv[0], arg3)
9161                    || copy_from_user_timeval(&tv[1],
9162                                              arg3 + sizeof(struct target_timeval)))
9163                    return -TARGET_EFAULT;
9164                tvp = tv;
9165            } else {
9166                tvp = NULL;
9167            }
9168            if (!(p = lock_user_string(arg2))) {
9169                return -TARGET_EFAULT;
9170            }
9171            ret = get_errno(futimesat(arg1, path(p), tvp));
9172            unlock_user(p, arg2, 0);
9173        }
9174        return ret;
9175#endif
9176#ifdef TARGET_NR_access
9177    case TARGET_NR_access:
9178        if (!(p = lock_user_string(arg1))) {
9179            return -TARGET_EFAULT;
9180        }
9181        ret = get_errno(access(path(p), arg2));
9182        unlock_user(p, arg1, 0);
9183        return ret;
9184#endif
9185#if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9186    case TARGET_NR_faccessat:
9187        if (!(p = lock_user_string(arg2))) {
9188            return -TARGET_EFAULT;
9189        }
9190        ret = get_errno(faccessat(arg1, p, arg3, 0));
9191        unlock_user(p, arg2, 0);
9192        return ret;
9193#endif
9194#if defined(TARGET_NR_faccessat2)
9195    case TARGET_NR_faccessat2:
9196        if (!(p = lock_user_string(arg2))) {
9197            return -TARGET_EFAULT;
9198        }
9199        ret = get_errno(faccessat(arg1, p, arg3, arg4));
9200        unlock_user(p, arg2, 0);
9201        return ret;
9202#endif
9203#ifdef TARGET_NR_nice /* not on alpha */
9204    case TARGET_NR_nice:
9205        return get_errno(nice(arg1));
9206#endif
9207    case TARGET_NR_sync:
9208        sync();
9209        return 0;
9210#if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9211    case TARGET_NR_syncfs:
9212        return get_errno(syncfs(arg1));
9213#endif
9214    case TARGET_NR_kill:
9215        return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9216#ifdef TARGET_NR_rename
9217    case TARGET_NR_rename:
9218        {
9219            void *p2;
9220            p = lock_user_string(arg1);
9221            p2 = lock_user_string(arg2);
9222            if (!p || !p2)
9223                ret = -TARGET_EFAULT;
9224            else
9225                ret = get_errno(rename(p, p2));
9226            unlock_user(p2, arg2, 0);
9227            unlock_user(p, arg1, 0);
9228        }
9229        return ret;
9230#endif
9231#if defined(TARGET_NR_renameat)
9232    case TARGET_NR_renameat:
9233        {
9234            void *p2;
9235            p  = lock_user_string(arg2);
9236            p2 = lock_user_string(arg4);
9237            if (!p || !p2)
9238                ret = -TARGET_EFAULT;
9239            else
9240                ret = get_errno(renameat(arg1, p, arg3, p2));
9241            unlock_user(p2, arg4, 0);
9242            unlock_user(p, arg2, 0);
9243        }
9244        return ret;
9245#endif
9246#if defined(TARGET_NR_renameat2)
9247    case TARGET_NR_renameat2:
9248        {
9249            void *p2;
9250            p  = lock_user_string(arg2);
9251            p2 = lock_user_string(arg4);
9252            if (!p || !p2) {
9253                ret = -TARGET_EFAULT;
9254            } else {
9255                ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9256            }
9257            unlock_user(p2, arg4, 0);
9258            unlock_user(p, arg2, 0);
9259        }
9260        return ret;
9261#endif
9262#ifdef TARGET_NR_mkdir
9263    case TARGET_NR_mkdir:
9264        if (!(p = lock_user_string(arg1)))
9265            return -TARGET_EFAULT;
9266        ret = get_errno(mkdir(p, arg2));
9267        unlock_user(p, arg1, 0);
9268        return ret;
9269#endif
9270#if defined(TARGET_NR_mkdirat)
9271    case TARGET_NR_mkdirat:
9272        if (!(p = lock_user_string(arg2)))
9273            return -TARGET_EFAULT;
9274        ret = get_errno(mkdirat(arg1, p, arg3));
9275        unlock_user(p, arg2, 0);
9276        return ret;
9277#endif
9278#ifdef TARGET_NR_rmdir
9279    case TARGET_NR_rmdir:
9280        if (!(p = lock_user_string(arg1)))
9281            return -TARGET_EFAULT;
9282        ret = get_errno(rmdir(p));
9283        unlock_user(p, arg1, 0);
9284        return ret;
9285#endif
9286    case TARGET_NR_dup:
9287        ret = get_errno(dup(arg1));
9288        if (ret >= 0) {
9289            fd_trans_dup(arg1, ret);
9290        }
9291        return ret;
9292#ifdef TARGET_NR_pipe
9293    case TARGET_NR_pipe:
9294        return do_pipe(cpu_env, arg1, 0, 0);
9295#endif
9296#ifdef TARGET_NR_pipe2
9297    case TARGET_NR_pipe2:
9298        return do_pipe(cpu_env, arg1,
9299                       target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9300#endif
9301    case TARGET_NR_times:
9302        {
9303            struct target_tms *tmsp;
9304            struct tms tms;
9305            ret = get_errno(times(&tms));
9306            if (arg1) {
9307                tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9308                if (!tmsp)
9309                    return -TARGET_EFAULT;
9310                tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9311                tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9312                tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9313                tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9314            }
9315            if (!is_error(ret))
9316                ret = host_to_target_clock_t(ret);
9317        }
9318        return ret;
9319    case TARGET_NR_acct:
9320        if (arg1 == 0) {
9321            ret = get_errno(acct(NULL));
9322        } else {
9323            if (!(p = lock_user_string(arg1))) {
9324                return -TARGET_EFAULT;
9325            }
9326            ret = get_errno(acct(path(p)));
9327            unlock_user(p, arg1, 0);
9328        }
9329        return ret;
9330#ifdef TARGET_NR_umount2
9331    case TARGET_NR_umount2:
9332        if (!(p = lock_user_string(arg1)))
9333            return -TARGET_EFAULT;
9334        ret = get_errno(umount2(p, arg2));
9335        unlock_user(p, arg1, 0);
9336        return ret;
9337#endif
9338    case TARGET_NR_ioctl:
9339        return do_ioctl(arg1, arg2, arg3);
9340#ifdef TARGET_NR_fcntl
9341    case TARGET_NR_fcntl:
9342        return do_fcntl(arg1, arg2, arg3);
9343#endif
9344    case TARGET_NR_setpgid:
9345        return get_errno(setpgid(arg1, arg2));
9346    case TARGET_NR_umask:
9347        return get_errno(umask(arg1));
9348    case TARGET_NR_chroot:
9349        if (!(p = lock_user_string(arg1)))
9350            return -TARGET_EFAULT;
9351        ret = get_errno(chroot(p));
9352        unlock_user(p, arg1, 0);
9353        return ret;
9354#ifdef TARGET_NR_dup2
9355    case TARGET_NR_dup2:
9356        ret = get_errno(dup2(arg1, arg2));
9357        if (ret >= 0) {
9358            fd_trans_dup(arg1, arg2);
9359        }
9360        return ret;
9361#endif
9362#if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9363    case TARGET_NR_dup3:
9364    {
9365        int host_flags;
9366
9367        if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9368            return -EINVAL;
9369        }
9370        host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9371        ret = get_errno(dup3(arg1, arg2, host_flags));
9372        if (ret >= 0) {
9373            fd_trans_dup(arg1, arg2);
9374        }
9375        return ret;
9376    }
9377#endif
9378#ifdef TARGET_NR_getppid /* not on alpha */
9379    case TARGET_NR_getppid:
9380        return get_errno(getppid());
9381#endif
9382#ifdef TARGET_NR_getpgrp
9383    case TARGET_NR_getpgrp:
9384        return get_errno(getpgrp());
9385#endif
9386    case TARGET_NR_setsid:
9387        return get_errno(setsid());
9388#ifdef TARGET_NR_sigaction
9389    case TARGET_NR_sigaction:
9390        {
9391#if defined(TARGET_MIPS)
9392            struct target_sigaction act, oact, *pact, *old_act;
9393
9394            if (arg2) {
9395                if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9396                    return -TARGET_EFAULT;
9397                act._sa_handler = old_act->_sa_handler;
9398                target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9399                act.sa_flags = old_act->sa_flags;
9400                unlock_user_struct(old_act, arg2, 0);
9401                pact = &act;
9402            } else {
9403                pact = NULL;
9404            }
9405
9406        ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9407
9408            if (!is_error(ret) && arg3) {
9409                if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9410                    return -TARGET_EFAULT;
9411                old_act->_sa_handler = oact._sa_handler;
9412                old_act->sa_flags = oact.sa_flags;
9413                old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9414                old_act->sa_mask.sig[1] = 0;
9415                old_act->sa_mask.sig[2] = 0;
9416                old_act->sa_mask.sig[3] = 0;
9417                unlock_user_struct(old_act, arg3, 1);
9418            }
9419#else
9420            struct target_old_sigaction *old_act;
9421            struct target_sigaction act, oact, *pact;
9422            if (arg2) {
9423                if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9424                    return -TARGET_EFAULT;
9425                act._sa_handler = old_act->_sa_handler;
9426                target_siginitset(&act.sa_mask, old_act->sa_mask);
9427                act.sa_flags = old_act->sa_flags;
9428#ifdef TARGET_ARCH_HAS_SA_RESTORER
9429                act.sa_restorer = old_act->sa_restorer;
9430#endif
9431                unlock_user_struct(old_act, arg2, 0);
9432                pact = &act;
9433            } else {
9434                pact = NULL;
9435            }
9436            ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9437            if (!is_error(ret) && arg3) {
9438                if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9439                    return -TARGET_EFAULT;
9440                old_act->_sa_handler = oact._sa_handler;
9441                old_act->sa_mask = oact.sa_mask.sig[0];
9442                old_act->sa_flags = oact.sa_flags;
9443#ifdef TARGET_ARCH_HAS_SA_RESTORER
9444                old_act->sa_restorer = oact.sa_restorer;
9445#endif
9446                unlock_user_struct(old_act, arg3, 1);
9447            }
9448#endif
9449        }
9450        return ret;
9451#endif
9452    case TARGET_NR_rt_sigaction:
9453        {
9454            /*
9455             * For Alpha and SPARC this is a 5 argument syscall, with
9456             * a 'restorer' parameter which must be copied into the
9457             * sa_restorer field of the sigaction struct.
9458             * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9459             * and arg5 is the sigsetsize.
9460             */
9461#if defined(TARGET_ALPHA)
9462            target_ulong sigsetsize = arg4;
9463            target_ulong restorer = arg5;
9464#elif defined(TARGET_SPARC)
9465            target_ulong restorer = arg4;
9466            target_ulong sigsetsize = arg5;
9467#else
9468            target_ulong sigsetsize = arg4;
9469            target_ulong restorer = 0;
9470#endif
9471            struct target_sigaction *act = NULL;
9472            struct target_sigaction *oact = NULL;
9473
9474            if (sigsetsize != sizeof(target_sigset_t)) {
9475                return -TARGET_EINVAL;
9476            }
9477            if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9478                return -TARGET_EFAULT;
9479            }
9480            if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9481                ret = -TARGET_EFAULT;
9482            } else {
9483                ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9484                if (oact) {
9485                    unlock_user_struct(oact, arg3, 1);
9486                }
9487            }
9488            if (act) {
9489                unlock_user_struct(act, arg2, 0);
9490            }
9491        }
9492        return ret;
9493#ifdef TARGET_NR_sgetmask /* not on alpha */
9494    case TARGET_NR_sgetmask:
9495        {
9496            sigset_t cur_set;
9497            abi_ulong target_set;
9498            ret = do_sigprocmask(0, NULL, &cur_set);
9499            if (!ret) {
9500                host_to_target_old_sigset(&target_set, &cur_set);
9501                ret = target_set;
9502            }
9503        }
9504        return ret;
9505#endif
9506#ifdef TARGET_NR_ssetmask /* not on alpha */
9507    case TARGET_NR_ssetmask:
9508        {
9509            sigset_t set, oset;
9510            abi_ulong target_set = arg1;
9511            target_to_host_old_sigset(&set, &target_set);
9512            ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9513            if (!ret) {
9514                host_to_target_old_sigset(&target_set, &oset);
9515                ret = target_set;
9516            }
9517        }
9518        return ret;
9519#endif
9520#ifdef TARGET_NR_sigprocmask
9521    case TARGET_NR_sigprocmask:
9522        {
9523#if defined(TARGET_ALPHA)
9524            sigset_t set, oldset;
9525            abi_ulong mask;
9526            int how;
9527
9528            switch (arg1) {
9529            case TARGET_SIG_BLOCK:
9530                how = SIG_BLOCK;
9531                break;
9532            case TARGET_SIG_UNBLOCK:
9533                how = SIG_UNBLOCK;
9534                break;
9535            case TARGET_SIG_SETMASK:
9536                how = SIG_SETMASK;
9537                break;
9538            default:
9539                return -TARGET_EINVAL;
9540            }
9541            mask = arg2;
9542            target_to_host_old_sigset(&set, &mask);
9543
9544            ret = do_sigprocmask(how, &set, &oldset);
9545            if (!is_error(ret)) {
9546                host_to_target_old_sigset(&mask, &oldset);
9547                ret = mask;
9548                cpu_env->ir[IR_V0] = 0; /* force no error */
9549            }
9550#else
9551            sigset_t set, oldset, *set_ptr;
9552            int how;
9553
9554            if (arg2) {
9555                p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9556                if (!p) {
9557                    return -TARGET_EFAULT;
9558                }
9559                target_to_host_old_sigset(&set, p);
9560                unlock_user(p, arg2, 0);
9561                set_ptr = &set;
9562                switch (arg1) {
9563                case TARGET_SIG_BLOCK:
9564                    how = SIG_BLOCK;
9565                    break;
9566                case TARGET_SIG_UNBLOCK:
9567                    how = SIG_UNBLOCK;
9568                    break;
9569                case TARGET_SIG_SETMASK:
9570                    how = SIG_SETMASK;
9571                    break;
9572                default:
9573                    return -TARGET_EINVAL;
9574                }
9575            } else {
9576                how = 0;
9577                set_ptr = NULL;
9578            }
9579            ret = do_sigprocmask(how, set_ptr, &oldset);
9580            if (!is_error(ret) && arg3) {
9581                if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9582                    return -TARGET_EFAULT;
9583                host_to_target_old_sigset(p, &oldset);
9584                unlock_user(p, arg3, sizeof(target_sigset_t));
9585            }
9586#endif
9587        }
9588        return ret;
9589#endif
9590    case TARGET_NR_rt_sigprocmask:
9591        {
9592            int how = arg1;
9593            sigset_t set, oldset, *set_ptr;
9594
9595            if (arg4 != sizeof(target_sigset_t)) {
9596                return -TARGET_EINVAL;
9597            }
9598
9599            if (arg2) {
9600                p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9601                if (!p) {
9602                    return -TARGET_EFAULT;
9603                }
9604                target_to_host_sigset(&set, p);
9605                unlock_user(p, arg2, 0);
9606                set_ptr = &set;
9607                switch(how) {
9608                case TARGET_SIG_BLOCK:
9609                    how = SIG_BLOCK;
9610                    break;
9611                case TARGET_SIG_UNBLOCK:
9612                    how = SIG_UNBLOCK;
9613                    break;
9614                case TARGET_SIG_SETMASK:
9615                    how = SIG_SETMASK;
9616                    break;
9617                default:
9618                    return -TARGET_EINVAL;
9619                }
9620            } else {
9621                how = 0;
9622                set_ptr = NULL;
9623            }
9624            ret = do_sigprocmask(how, set_ptr, &oldset);
9625            if (!is_error(ret) && arg3) {
9626                if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9627                    return -TARGET_EFAULT;
9628                host_to_target_sigset(p, &oldset);
9629                unlock_user(p, arg3, sizeof(target_sigset_t));
9630            }
9631        }
9632        return ret;
9633#ifdef TARGET_NR_sigpending
9634    case TARGET_NR_sigpending:
9635        {
9636            sigset_t set;
9637            ret = get_errno(sigpending(&set));
9638            if (!is_error(ret)) {
9639                if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9640                    return -TARGET_EFAULT;
9641                host_to_target_old_sigset(p, &set);
9642                unlock_user(p, arg1, sizeof(target_sigset_t));
9643            }
9644        }
9645        return ret;
9646#endif
9647    case TARGET_NR_rt_sigpending:
9648        {
9649            sigset_t set;
9650
9651            /* Yes, this check is >, not != like most. We follow the kernel's
9652             * logic and it does it like this because it implements
9653             * NR_sigpending through the same code path, and in that case
9654             * the old_sigset_t is smaller in size.
9655             */
9656            if (arg2 > sizeof(target_sigset_t)) {
9657                return -TARGET_EINVAL;
9658            }
9659
9660            ret = get_errno(sigpending(&set));
9661            if (!is_error(ret)) {
9662                if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9663                    return -TARGET_EFAULT;
9664                host_to_target_sigset(p, &set);
9665                unlock_user(p, arg1, sizeof(target_sigset_t));
9666            }
9667        }
9668        return ret;
9669#ifdef TARGET_NR_sigsuspend
9670    case TARGET_NR_sigsuspend:
9671        {
9672            sigset_t *set;
9673
9674#if defined(TARGET_ALPHA)
9675            TaskState *ts = cpu->opaque;
9676            /* target_to_host_old_sigset will bswap back */
9677            abi_ulong mask = tswapal(arg1);
9678            set = &ts->sigsuspend_mask;
9679            target_to_host_old_sigset(set, &mask);
9680#else
9681            ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9682            if (ret != 0) {
9683                return ret;
9684            }
9685#endif
9686            ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9687            finish_sigsuspend_mask(ret);
9688        }
9689        return ret;
9690#endif
9691    case TARGET_NR_rt_sigsuspend:
9692        {
9693            sigset_t *set;
9694
9695            ret = process_sigsuspend_mask(&set, arg1, arg2);
9696            if (ret != 0) {
9697                return ret;
9698            }
9699            ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9700            finish_sigsuspend_mask(ret);
9701        }
9702        return ret;
9703#ifdef TARGET_NR_rt_sigtimedwait
9704    case TARGET_NR_rt_sigtimedwait:
9705        {
9706            sigset_t set;
9707            struct timespec uts, *puts;
9708            siginfo_t uinfo;
9709
9710            if (arg4 != sizeof(target_sigset_t)) {
9711                return -TARGET_EINVAL;
9712            }
9713
9714            if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9715                return -TARGET_EFAULT;
9716            target_to_host_sigset(&set, p);
9717            unlock_user(p, arg1, 0);
9718            if (arg3) {
9719                puts = &uts;
9720                if (target_to_host_timespec(puts, arg3)) {
9721                    return -TARGET_EFAULT;
9722                }
9723            } else {
9724                puts = NULL;
9725            }
9726            ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9727                                                 SIGSET_T_SIZE));
9728            if (!is_error(ret)) {
9729                if (arg2) {
9730                    p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9731                                  0);
9732                    if (!p) {
9733                        return -TARGET_EFAULT;
9734                    }
9735                    host_to_target_siginfo(p, &uinfo);
9736                    unlock_user(p, arg2, sizeof(target_siginfo_t));
9737                }
9738                ret = host_to_target_signal(ret);
9739            }
9740        }
9741        return ret;
9742#endif
9743#ifdef TARGET_NR_rt_sigtimedwait_time64
9744    case TARGET_NR_rt_sigtimedwait_time64:
9745        {
9746            sigset_t set;
9747            struct timespec uts, *puts;
9748            siginfo_t uinfo;
9749
9750            if (arg4 != sizeof(target_sigset_t)) {
9751                return -TARGET_EINVAL;
9752            }
9753
9754            p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9755            if (!p) {
9756                return -TARGET_EFAULT;
9757            }
9758            target_to_host_sigset(&set, p);
9759            unlock_user(p, arg1, 0);
9760            if (arg3) {
9761                puts = &uts;
9762                if (target_to_host_timespec64(puts, arg3)) {
9763                    return -TARGET_EFAULT;
9764                }
9765            } else {
9766                puts = NULL;
9767            }
9768            ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9769                                                 SIGSET_T_SIZE));
9770            if (!is_error(ret)) {
9771                if (arg2) {
9772                    p = lock_user(VERIFY_WRITE, arg2,
9773                                  sizeof(target_siginfo_t), 0);
9774                    if (!p) {
9775                        return -TARGET_EFAULT;
9776                    }
9777                    host_to_target_siginfo(p, &uinfo);
9778                    unlock_user(p, arg2, sizeof(target_siginfo_t));
9779                }
9780                ret = host_to_target_signal(ret);
9781            }
9782        }
9783        return ret;
9784#endif
9785    case TARGET_NR_rt_sigqueueinfo:
9786        {
9787            siginfo_t uinfo;
9788
9789            p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9790            if (!p) {
9791                return -TARGET_EFAULT;
9792            }
9793            target_to_host_siginfo(&uinfo, p);
9794            unlock_user(p, arg3, 0);
9795            ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9796        }
9797        return ret;
9798    case TARGET_NR_rt_tgsigqueueinfo:
9799        {
9800            siginfo_t uinfo;
9801
9802            p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9803            if (!p) {
9804                return -TARGET_EFAULT;
9805            }
9806            target_to_host_siginfo(&uinfo, p);
9807            unlock_user(p, arg4, 0);
9808            ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9809        }
9810        return ret;
9811#ifdef TARGET_NR_sigreturn
9812    case TARGET_NR_sigreturn:
9813        if (block_signals()) {
9814            return -QEMU_ERESTARTSYS;
9815        }
9816        return do_sigreturn(cpu_env);
9817#endif
9818    case TARGET_NR_rt_sigreturn:
9819        if (block_signals()) {
9820            return -QEMU_ERESTARTSYS;
9821        }
9822        return do_rt_sigreturn(cpu_env);
9823    case TARGET_NR_sethostname:
9824        if (!(p = lock_user_string(arg1)))
9825            return -TARGET_EFAULT;
9826        ret = get_errno(sethostname(p, arg2));
9827        unlock_user(p, arg1, 0);
9828        return ret;
9829#ifdef TARGET_NR_setrlimit
9830    case TARGET_NR_setrlimit:
9831        {
9832            int resource = target_to_host_resource(arg1);
9833            struct target_rlimit *target_rlim;
9834            struct rlimit rlim;
9835            if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9836                return -TARGET_EFAULT;
9837            rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9838            rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9839            unlock_user_struct(target_rlim, arg2, 0);
9840            /*
9841             * If we just passed through resource limit settings for memory then
9842             * they would also apply to QEMU's own allocations, and QEMU will
9843             * crash or hang or die if its allocations fail. Ideally we would
9844             * track the guest allocations in QEMU and apply the limits ourselves.
9845             * For now, just tell the guest the call succeeded but don't actually
9846             * limit anything.
9847             */
9848            if (resource != RLIMIT_AS &&
9849                resource != RLIMIT_DATA &&
9850                resource != RLIMIT_STACK) {
9851                return get_errno(setrlimit(resource, &rlim));
9852            } else {
9853                return 0;
9854            }
9855        }
9856#endif
9857#ifdef TARGET_NR_getrlimit
9858    case TARGET_NR_getrlimit:
9859        {
9860            int resource = target_to_host_resource(arg1);
9861            struct target_rlimit *target_rlim;
9862            struct rlimit rlim;
9863
9864            ret = get_errno(getrlimit(resource, &rlim));
9865            if (!is_error(ret)) {
9866                if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9867                    return -TARGET_EFAULT;
9868                target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9869                target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9870                unlock_user_struct(target_rlim, arg2, 1);
9871            }
9872        }
9873        return ret;
9874#endif
9875    case TARGET_NR_getrusage:
9876        {
9877            struct rusage rusage;
9878            ret = get_errno(getrusage(arg1, &rusage));
9879            if (!is_error(ret)) {
9880                ret = host_to_target_rusage(arg2, &rusage);
9881            }
9882        }
9883        return ret;
9884#if defined(TARGET_NR_gettimeofday)
9885    case TARGET_NR_gettimeofday:
9886        {
9887            struct timeval tv;
9888            struct timezone tz;
9889
9890            ret = get_errno(gettimeofday(&tv, &tz));
9891            if (!is_error(ret)) {
9892                if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9893                    return -TARGET_EFAULT;
9894                }
9895                if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9896                    return -TARGET_EFAULT;
9897                }
9898            }
9899        }
9900        return ret;
9901#endif
9902#if defined(TARGET_NR_settimeofday)
9903    case TARGET_NR_settimeofday:
9904        {
9905            struct timeval tv, *ptv = NULL;
9906            struct timezone tz, *ptz = NULL;
9907
9908            if (arg1) {
9909                if (copy_from_user_timeval(&tv, arg1)) {
9910                    return -TARGET_EFAULT;
9911                }
9912                ptv = &tv;
9913            }
9914
9915            if (arg2) {
9916                if (copy_from_user_timezone(&tz, arg2)) {
9917                    return -TARGET_EFAULT;
9918                }
9919                ptz = &tz;
9920            }
9921
9922            return get_errno(settimeofday(ptv, ptz));
9923        }
9924#endif
9925#if defined(TARGET_NR_select)
9926    case TARGET_NR_select:
9927#if defined(TARGET_WANT_NI_OLD_SELECT)
9928        /* some architectures used to have old_select here
9929         * but now ENOSYS it.
9930         */
9931        ret = -TARGET_ENOSYS;
9932#elif defined(TARGET_WANT_OLD_SYS_SELECT)
9933        ret = do_old_select(arg1);
9934#else
9935        ret = do_select(arg1, arg2, arg3, arg4, arg5);
9936#endif
9937        return ret;
9938#endif
9939#ifdef TARGET_NR_pselect6
9940    case TARGET_NR_pselect6:
9941        return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9942#endif
9943#ifdef TARGET_NR_pselect6_time64
9944    case TARGET_NR_pselect6_time64:
9945        return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9946#endif
9947#ifdef TARGET_NR_symlink
9948    case TARGET_NR_symlink:
9949        {
9950            void *p2;
9951            p = lock_user_string(arg1);
9952            p2 = lock_user_string(arg2);
9953            if (!p || !p2)
9954                ret = -TARGET_EFAULT;
9955            else
9956                ret = get_errno(symlink(p, p2));
9957            unlock_user(p2, arg2, 0);
9958            unlock_user(p, arg1, 0);
9959        }
9960        return ret;
9961#endif
9962#if defined(TARGET_NR_symlinkat)
9963    case TARGET_NR_symlinkat:
9964        {
9965            void *p2;
9966            p  = lock_user_string(arg1);
9967            p2 = lock_user_string(arg3);
9968            if (!p || !p2)
9969                ret = -TARGET_EFAULT;
9970            else
9971                ret = get_errno(symlinkat(p, arg2, p2));
9972            unlock_user(p2, arg3, 0);
9973            unlock_user(p, arg1, 0);
9974        }
9975        return ret;
9976#endif
9977#ifdef TARGET_NR_readlink
9978    case TARGET_NR_readlink:
9979        {
9980            void *p2;
9981            p = lock_user_string(arg1);
9982            p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9983            if (!p || !p2) {
9984                ret = -TARGET_EFAULT;
9985            } else if (!arg3) {
9986                /* Short circuit this for the magic exe check. */
9987                ret = -TARGET_EINVAL;
9988            } else if (is_proc_myself((const char *)p, "exe")) {
9989                char real[PATH_MAX], *temp;
9990                temp = realpath(exec_path, real);
9991                /* Return value is # of bytes that we wrote to the buffer. */
9992                if (temp == NULL) {
9993                    ret = get_errno(-1);
9994                } else {
9995                    /* Don't worry about sign mismatch as earlier mapping
9996                     * logic would have thrown a bad address error. */
9997                    ret = MIN(strlen(real), arg3);
9998                    /* We cannot NUL terminate the string. */
9999                    memcpy(p2, real, ret);
10000                }
10001            } else {
10002                ret = get_errno(readlink(path(p), p2, arg3));
10003            }
10004            unlock_user(p2, arg2, ret);
10005            unlock_user(p, arg1, 0);
10006        }
10007        return ret;
10008#endif
10009#if defined(TARGET_NR_readlinkat)
10010    case TARGET_NR_readlinkat:
10011        {
10012            void *p2;
10013            p  = lock_user_string(arg2);
10014            p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10015            if (!p || !p2) {
10016                ret = -TARGET_EFAULT;
10017            } else if (!arg4) {
10018                /* Short circuit this for the magic exe check. */
10019                ret = -TARGET_EINVAL;
10020            } else if (is_proc_myself((const char *)p, "exe")) {
10021                char real[PATH_MAX], *temp;
10022                temp = realpath(exec_path, real);
10023                /* Return value is # of bytes that we wrote to the buffer. */
10024                if (temp == NULL) {
10025                    ret = get_errno(-1);
10026                } else {
10027                    /* Don't worry about sign mismatch as earlier mapping
10028                     * logic would have thrown a bad address error. */
10029                    ret = MIN(strlen(real), arg4);
10030                    /* We cannot NUL terminate the string. */
10031                    memcpy(p2, real, ret);
10032                }
10033            } else {
10034                ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10035            }
10036            unlock_user(p2, arg3, ret);
10037            unlock_user(p, arg2, 0);
10038        }
10039        return ret;
10040#endif
10041#ifdef TARGET_NR_swapon
10042    case TARGET_NR_swapon:
10043        if (!(p = lock_user_string(arg1)))
10044            return -TARGET_EFAULT;
10045        ret = get_errno(swapon(p, arg2));
10046        unlock_user(p, arg1, 0);
10047        return ret;
10048#endif
10049    case TARGET_NR_reboot:
10050        if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10051           /* arg4 must be ignored in all other cases */
10052           p = lock_user_string(arg4);
10053           if (!p) {
10054               return -TARGET_EFAULT;
10055           }
10056           ret = get_errno(reboot(arg1, arg2, arg3, p));
10057           unlock_user(p, arg4, 0);
10058        } else {
10059           ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10060        }
10061        return ret;
10062#ifdef TARGET_NR_mmap
10063    case TARGET_NR_mmap:
10064#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10065    (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10066    defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10067    || defined(TARGET_S390X)
10068        {
10069            abi_ulong *v;
10070            abi_ulong v1, v2, v3, v4, v5, v6;
10071            if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10072                return -TARGET_EFAULT;
10073            v1 = tswapal(v[0]);
10074            v2 = tswapal(v[1]);
10075            v3 = tswapal(v[2]);
10076            v4 = tswapal(v[3]);
10077            v5 = tswapal(v[4]);
10078            v6 = tswapal(v[5]);
10079            unlock_user(v, arg1, 0);
10080            ret = get_errno(target_mmap(v1, v2, v3,
10081                                        target_to_host_bitmask(v4, mmap_flags_tbl),
10082                                        v5, v6));
10083        }
10084#else
10085        /* mmap pointers are always untagged */
10086        ret = get_errno(target_mmap(arg1, arg2, arg3,
10087                                    target_to_host_bitmask(arg4, mmap_flags_tbl),
10088                                    arg5,
10089                                    arg6));
10090#endif
10091        return ret;
10092#endif
10093#ifdef TARGET_NR_mmap2
10094    case TARGET_NR_mmap2:
10095#ifndef MMAP_SHIFT
10096#define MMAP_SHIFT 12
10097#endif
10098        ret = target_mmap(arg1, arg2, arg3,
10099                          target_to_host_bitmask(arg4, mmap_flags_tbl),
10100                          arg5, arg6 << MMAP_SHIFT);
10101        return get_errno(ret);
10102#endif
10103    case TARGET_NR_munmap:
10104        arg1 = cpu_untagged_addr(cpu, arg1);
10105        return get_errno(target_munmap(arg1, arg2));
10106    case TARGET_NR_mprotect:
10107        arg1 = cpu_untagged_addr(cpu, arg1);
10108        {
10109            TaskState *ts = cpu->opaque;
10110            /* Special hack to detect libc making the stack executable.  */
10111            if ((arg3 & PROT_GROWSDOWN)
10112                && arg1 >= ts->info->stack_limit
10113                && arg1 <= ts->info->start_stack) {
10114                arg3 &= ~PROT_GROWSDOWN;
10115                arg2 = arg2 + arg1 - ts->info->stack_limit;
10116                arg1 = ts->info->stack_limit;
10117            }
10118        }
10119        return get_errno(target_mprotect(arg1, arg2, arg3));
10120#ifdef TARGET_NR_mremap
10121    case TARGET_NR_mremap:
10122        arg1 = cpu_untagged_addr(cpu, arg1);
10123        /* mremap new_addr (arg5) is always untagged */
10124        return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10125#endif
10126        /* ??? msync/mlock/munlock are broken for softmmu.  */
10127#ifdef TARGET_NR_msync
10128    case TARGET_NR_msync:
10129        return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10130#endif
10131#ifdef TARGET_NR_mlock
10132    case TARGET_NR_mlock:
10133        return get_errno(mlock(g2h(cpu, arg1), arg2));
10134#endif
10135#ifdef TARGET_NR_munlock
10136    case TARGET_NR_munlock:
10137        return get_errno(munlock(g2h(cpu, arg1), arg2));
10138#endif
10139#ifdef TARGET_NR_mlockall
10140    case TARGET_NR_mlockall:
10141        return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10142#endif
10143#ifdef TARGET_NR_munlockall
10144    case TARGET_NR_munlockall:
10145        return get_errno(munlockall());
10146#endif
10147#ifdef TARGET_NR_truncate
10148    case TARGET_NR_truncate:
10149        if (!(p = lock_user_string(arg1)))
10150            return -TARGET_EFAULT;
10151        ret = get_errno(truncate(p, arg2));
10152        unlock_user(p, arg1, 0);
10153        return ret;
10154#endif
10155#ifdef TARGET_NR_ftruncate
10156    case TARGET_NR_ftruncate:
10157        return get_errno(ftruncate(arg1, arg2));
10158#endif
10159    case TARGET_NR_fchmod:
10160        return get_errno(fchmod(arg1, arg2));
10161#if defined(TARGET_NR_fchmodat)
10162    case TARGET_NR_fchmodat:
10163        if (!(p = lock_user_string(arg2)))
10164            return -TARGET_EFAULT;
10165        ret = get_errno(fchmodat(arg1, p, arg3, 0));
10166        unlock_user(p, arg2, 0);
10167        return ret;
10168#endif
10169    case TARGET_NR_getpriority:
10170        /* Note that negative values are valid for getpriority, so we must
10171           differentiate based on errno settings.  */
10172        errno = 0;
10173        ret = getpriority(arg1, arg2);
10174        if (ret == -1 && errno != 0) {
10175            return -host_to_target_errno(errno);
10176        }
10177#ifdef TARGET_ALPHA
10178        /* Return value is the unbiased priority.  Signal no error.  */
10179        cpu_env->ir[IR_V0] = 0;
10180#else
10181        /* Return value is a biased priority to avoid negative numbers.  */
10182        ret = 20 - ret;
10183#endif
10184        return ret;
10185    case TARGET_NR_setpriority:
10186        return get_errno(setpriority(arg1, arg2, arg3));
10187#ifdef TARGET_NR_statfs
10188    case TARGET_NR_statfs:
10189        if (!(p = lock_user_string(arg1))) {
10190            return -TARGET_EFAULT;
10191        }
10192        ret = get_errno(statfs(path(p), &stfs));
10193        unlock_user(p, arg1, 0);
10194    convert_statfs:
10195        if (!is_error(ret)) {
10196            struct target_statfs *target_stfs;
10197
10198            if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10199                return -TARGET_EFAULT;
10200            __put_user(stfs.f_type, &target_stfs->f_type);
10201            __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10202            __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10203            __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10204            __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10205            __put_user(stfs.f_files, &target_stfs->f_files);
10206            __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10207            __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10208            __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10209            __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10210            __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10211#ifdef _STATFS_F_FLAGS
10212            __put_user(stfs.f_flags, &target_stfs->f_flags);
10213#else
10214            __put_user(0, &target_stfs->f_flags);
10215#endif
10216            memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10217            unlock_user_struct(target_stfs, arg2, 1);
10218        }
10219        return ret;
10220#endif
10221#ifdef TARGET_NR_fstatfs
10222    case TARGET_NR_fstatfs:
10223        ret = get_errno(fstatfs(arg1, &stfs));
10224        goto convert_statfs;
10225#endif
10226#ifdef TARGET_NR_statfs64
10227    case TARGET_NR_statfs64:
10228        if (!(p = lock_user_string(arg1))) {
10229            return -TARGET_EFAULT;
10230        }
10231        ret = get_errno(statfs(path(p), &stfs));
10232        unlock_user(p, arg1, 0);
10233    convert_statfs64:
10234        if (!is_error(ret)) {
10235            struct target_statfs64 *target_stfs;
10236
10237            if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10238                return -TARGET_EFAULT;
10239            __put_user(stfs.f_type, &target_stfs->f_type);
10240            __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10241            __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10242            __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10243            __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10244            __put_user(stfs.f_files, &target_stfs->f_files);
10245            __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10246            __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10247            __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10248            __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10249            __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10250#ifdef _STATFS_F_FLAGS
10251            __put_user(stfs.f_flags, &target_stfs->f_flags);
10252#else
10253            __put_user(0, &target_stfs->f_flags);
10254#endif
10255            memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10256            unlock_user_struct(target_stfs, arg3, 1);
10257        }
10258        return ret;
10259    case TARGET_NR_fstatfs64:
10260        ret = get_errno(fstatfs(arg1, &stfs));
10261        goto convert_statfs64;
10262#endif
10263#ifdef TARGET_NR_socketcall
10264    case TARGET_NR_socketcall:
10265        return do_socketcall(arg1, arg2);
10266#endif
10267#ifdef TARGET_NR_accept
10268    case TARGET_NR_accept:
10269        return do_accept4(arg1, arg2, arg3, 0);
10270#endif
10271#ifdef TARGET_NR_accept4
10272    case TARGET_NR_accept4:
10273        return do_accept4(arg1, arg2, arg3, arg4);
10274#endif
10275#ifdef TARGET_NR_bind
10276    case TARGET_NR_bind:
10277        return do_bind(arg1, arg2, arg3);
10278#endif
10279#ifdef TARGET_NR_connect
10280    case TARGET_NR_connect:
10281        return do_connect(arg1, arg2, arg3);
10282#endif
10283#ifdef TARGET_NR_getpeername
10284    case TARGET_NR_getpeername:
10285        return do_getpeername(arg1, arg2, arg3);
10286#endif
10287#ifdef TARGET_NR_getsockname
10288    case TARGET_NR_getsockname:
10289        return do_getsockname(arg1, arg2, arg3);
10290#endif
10291#ifdef TARGET_NR_getsockopt
10292    case TARGET_NR_getsockopt:
10293        return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10294#endif
10295#ifdef TARGET_NR_listen
10296    case TARGET_NR_listen:
10297        return get_errno(listen(arg1, arg2));
10298#endif
10299#ifdef TARGET_NR_recv
10300    case TARGET_NR_recv:
10301        return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10302#endif
10303#ifdef TARGET_NR_recvfrom
10304    case TARGET_NR_recvfrom:
10305        return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10306#endif
10307#ifdef TARGET_NR_recvmsg
10308    case TARGET_NR_recvmsg:
10309        return do_sendrecvmsg(arg1, arg2, arg3, 0);
10310#endif
10311#ifdef TARGET_NR_send
10312    case TARGET_NR_send:
10313        return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10314#endif
10315#ifdef TARGET_NR_sendmsg
10316    case TARGET_NR_sendmsg:
10317        return do_sendrecvmsg(arg1, arg2, arg3, 1);
10318#endif
10319#ifdef TARGET_NR_sendmmsg
10320    case TARGET_NR_sendmmsg:
10321        return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10322#endif
10323#ifdef TARGET_NR_recvmmsg
10324    case TARGET_NR_recvmmsg:
10325        return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10326#endif
10327#ifdef TARGET_NR_sendto
10328    case TARGET_NR_sendto:
10329        return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10330#endif
10331#ifdef TARGET_NR_shutdown
10332    case TARGET_NR_shutdown:
10333        return get_errno(shutdown(arg1, arg2));
10334#endif
10335#if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10336    case TARGET_NR_getrandom:
10337        p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10338        if (!p) {
10339            return -TARGET_EFAULT;
10340        }
10341        ret = get_errno(getrandom(p, arg2, arg3));
10342        unlock_user(p, arg1, ret);
10343        return ret;
10344#endif
10345#ifdef TARGET_NR_socket
10346    case TARGET_NR_socket:
10347        return do_socket(arg1, arg2, arg3);
10348#endif
10349#ifdef TARGET_NR_socketpair
10350    case TARGET_NR_socketpair:
10351        return do_socketpair(arg1, arg2, arg3, arg4);
10352#endif
10353#ifdef TARGET_NR_setsockopt
10354    case TARGET_NR_setsockopt:
10355        return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10356#endif
10357#if defined(TARGET_NR_syslog)
10358    case TARGET_NR_syslog:
10359        {
10360            int len = arg2;
10361
10362            switch (arg1) {
10363            case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10364            case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10365            case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10366            case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10367            case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10368            case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10369            case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10370            case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10371                return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10372            case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10373            case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10374            case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10375                {
10376                    if (len < 0) {
10377                        return -TARGET_EINVAL;
10378                    }
10379                    if (len == 0) {
10380                        return 0;
10381                    }
10382                    p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10383                    if (!p) {
10384                        return -TARGET_EFAULT;
10385                    }
10386                    ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10387                    unlock_user(p, arg2, arg3);
10388                }
10389                return ret;
10390            default:
10391                return -TARGET_EINVAL;
10392            }
10393        }
10394        break;
10395#endif
10396    case TARGET_NR_setitimer:
10397        {
10398            struct itimerval value, ovalue, *pvalue;
10399
10400            if (arg2) {
10401                pvalue = &value;
10402                if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10403                    || copy_from_user_timeval(&pvalue->it_value,
10404                                              arg2 + sizeof(struct target_timeval)))
10405                    return -TARGET_EFAULT;
10406            } else {
10407                pvalue = NULL;
10408            }
10409            ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10410            if (!is_error(ret) && arg3) {
10411                if (copy_to_user_timeval(arg3,
10412                                         &ovalue.it_interval)
10413                    || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10414                                            &ovalue.it_value))
10415                    return -TARGET_EFAULT;
10416            }
10417        }
10418        return ret;
10419    case TARGET_NR_getitimer:
10420        {
10421            struct itimerval value;
10422
10423            ret = get_errno(getitimer(arg1, &value));
10424            if (!is_error(ret) && arg2) {
10425                if (copy_to_user_timeval(arg2,
10426                                         &value.it_interval)
10427                    || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10428                                            &value.it_value))
10429                    return -TARGET_EFAULT;
10430            }
10431        }
10432        return ret;
10433#ifdef TARGET_NR_stat
10434    case TARGET_NR_stat:
10435        if (!(p = lock_user_string(arg1))) {
10436            return -TARGET_EFAULT;
10437        }
10438        ret = get_errno(stat(path(p), &st));
10439        unlock_user(p, arg1, 0);
10440        goto do_stat;
10441#endif
10442#ifdef TARGET_NR_lstat
10443    case TARGET_NR_lstat:
10444        if (!(p = lock_user_string(arg1))) {
10445            return -TARGET_EFAULT;
10446        }
10447        ret = get_errno(lstat(path(p), &st));
10448        unlock_user(p, arg1, 0);
10449        goto do_stat;
10450#endif
10451#ifdef TARGET_NR_fstat
10452    case TARGET_NR_fstat:
10453        {
10454            ret = get_errno(fstat(arg1, &st));
10455#if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10456        do_stat:
10457#endif
10458            if (!is_error(ret)) {
10459                struct target_stat *target_st;
10460
10461                if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10462                    return -TARGET_EFAULT;
10463                memset(target_st, 0, sizeof(*target_st));
10464                __put_user(st.st_dev, &target_st->st_dev);
10465                __put_user(st.st_ino, &target_st->st_ino);
10466                __put_user(st.st_mode, &target_st->st_mode);
10467                __put_user(st.st_uid, &target_st->st_uid);
10468                __put_user(st.st_gid, &target_st->st_gid);
10469                __put_user(st.st_nlink, &target_st->st_nlink);
10470                __put_user(st.st_rdev, &target_st->st_rdev);
10471                __put_user(st.st_size, &target_st->st_size);
10472                __put_user(st.st_blksize, &target_st->st_blksize);
10473                __put_user(st.st_blocks, &target_st->st_blocks);
10474                __put_user(st.st_atime, &target_st->target_st_atime);
10475                __put_user(st.st_mtime, &target_st->target_st_mtime);
10476                __put_user(st.st_ctime, &target_st->target_st_ctime);
10477#if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10478                __put_user(st.st_atim.tv_nsec,
10479                           &target_st->target_st_atime_nsec);
10480                __put_user(st.st_mtim.tv_nsec,
10481                           &target_st->target_st_mtime_nsec);
10482                __put_user(st.st_ctim.tv_nsec,
10483                           &target_st->target_st_ctime_nsec);
10484#endif
10485                unlock_user_struct(target_st, arg2, 1);
10486            }
10487        }
10488        return ret;
10489#endif
10490    case TARGET_NR_vhangup:
10491        return get_errno(vhangup());
10492#ifdef TARGET_NR_syscall
10493    case TARGET_NR_syscall:
10494        return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10495                          arg6, arg7, arg8, 0);
10496#endif
10497#if defined(TARGET_NR_wait4)
10498    case TARGET_NR_wait4:
10499        {
10500            int status;
10501            abi_long status_ptr = arg2;
10502            struct rusage rusage, *rusage_ptr;
10503            abi_ulong target_rusage = arg4;
10504            abi_long rusage_err;
10505            if (target_rusage)
10506                rusage_ptr = &rusage;
10507            else
10508                rusage_ptr = NULL;
10509            ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10510            if (!is_error(ret)) {
10511                if (status_ptr && ret) {
10512                    status = host_to_target_waitstatus(status);
10513                    if (put_user_s32(status, status_ptr))
10514                        return -TARGET_EFAULT;
10515                }
10516                if (target_rusage) {
10517                    rusage_err = host_to_target_rusage(target_rusage, &rusage);
10518                    if (rusage_err) {
10519                        ret = rusage_err;
10520                    }
10521                }
10522            }
10523        }
10524        return ret;
10525#endif
10526#ifdef TARGET_NR_swapoff
10527    case TARGET_NR_swapoff:
10528        if (!(p = lock_user_string(arg1)))
10529            return -TARGET_EFAULT;
10530        ret = get_errno(swapoff(p));
10531        unlock_user(p, arg1, 0);
10532        return ret;
10533#endif
10534    case TARGET_NR_sysinfo:
10535        {
10536            struct target_sysinfo *target_value;
10537            struct sysinfo value;
10538            ret = get_errno(sysinfo(&value));
10539            if (!is_error(ret) && arg1)
10540            {
10541                if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10542                    return -TARGET_EFAULT;
10543                __put_user(value.uptime, &target_value->uptime);
10544                __put_user(value.loads[0], &target_value->loads[0]);
10545                __put_user(value.loads[1], &target_value->loads[1]);
10546                __put_user(value.loads[2], &target_value->loads[2]);
10547                __put_user(value.totalram, &target_value->totalram);
10548                __put_user(value.freeram, &target_value->freeram);
10549                __put_user(value.sharedram, &target_value->sharedram);
10550                __put_user(value.bufferram, &target_value->bufferram);
10551                __put_user(value.totalswap, &target_value->totalswap);
10552                __put_user(value.freeswap, &target_value->freeswap);
10553                __put_user(value.procs, &target_value->procs);
10554                __put_user(value.totalhigh, &target_value->totalhigh);
10555                __put_user(value.freehigh, &target_value->freehigh);
10556                __put_user(value.mem_unit, &target_value->mem_unit);
10557                unlock_user_struct(target_value, arg1, 1);
10558            }
10559        }
10560        return ret;
10561#ifdef TARGET_NR_ipc
10562    case TARGET_NR_ipc:
10563        return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10564#endif
10565#ifdef TARGET_NR_semget
10566    case TARGET_NR_semget:
10567        return get_errno(semget(arg1, arg2, arg3));
10568#endif
10569#ifdef TARGET_NR_semop
10570    case TARGET_NR_semop:
10571        return do_semtimedop(arg1, arg2, arg3, 0, false);
10572#endif
10573#ifdef TARGET_NR_semtimedop
10574    case TARGET_NR_semtimedop:
10575        return do_semtimedop(arg1, arg2, arg3, arg4, false);
10576#endif
10577#ifdef TARGET_NR_semtimedop_time64
10578    case TARGET_NR_semtimedop_time64:
10579        return do_semtimedop(arg1, arg2, arg3, arg4, true);
10580#endif
10581#ifdef TARGET_NR_semctl
10582    case TARGET_NR_semctl:
10583        return do_semctl(arg1, arg2, arg3, arg4);
10584#endif
10585#ifdef TARGET_NR_msgctl
10586    case TARGET_NR_msgctl:
10587        return do_msgctl(arg1, arg2, arg3);
10588#endif
10589#ifdef TARGET_NR_msgget
10590    case TARGET_NR_msgget:
10591        return get_errno(msgget(arg1, arg2));
10592#endif
10593#ifdef TARGET_NR_msgrcv
10594    case TARGET_NR_msgrcv:
10595        return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10596#endif
10597#ifdef TARGET_NR_msgsnd
10598    case TARGET_NR_msgsnd:
10599        return do_msgsnd(arg1, arg2, arg3, arg4);
10600#endif
10601#ifdef TARGET_NR_shmget
10602    case TARGET_NR_shmget:
10603        return get_errno(shmget(arg1, arg2, arg3));
10604#endif
10605#ifdef TARGET_NR_shmctl
10606    case TARGET_NR_shmctl:
10607        return do_shmctl(arg1, arg2, arg3);
10608#endif
10609#ifdef TARGET_NR_shmat
10610    case TARGET_NR_shmat:
10611        return do_shmat(cpu_env, arg1, arg2, arg3);
10612#endif
10613#ifdef TARGET_NR_shmdt
10614    case TARGET_NR_shmdt:
10615        return do_shmdt(arg1);
10616#endif
10617    case TARGET_NR_fsync:
10618        return get_errno(fsync(arg1));
10619    case TARGET_NR_clone:
10620        /* Linux manages to have three different orderings for its
10621         * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10622         * match the kernel's CONFIG_CLONE_* settings.
10623         * Microblaze is further special in that it uses a sixth
10624         * implicit argument to clone for the TLS pointer.
10625         */
10626#if defined(TARGET_MICROBLAZE)
10627        ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10628#elif defined(TARGET_CLONE_BACKWARDS)
10629        ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10630#elif defined(TARGET_CLONE_BACKWARDS2)
10631        ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10632#else
10633        ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10634#endif
10635        return ret;
10636#ifdef __NR_exit_group
10637        /* new thread calls */
10638    case TARGET_NR_exit_group:
10639        preexit_cleanup(cpu_env, arg1);
10640        return get_errno(exit_group(arg1));
10641#endif
10642    case TARGET_NR_setdomainname:
10643        if (!(p = lock_user_string(arg1)))
10644            return -TARGET_EFAULT;
10645        ret = get_errno(setdomainname(p, arg2));
10646        unlock_user(p, arg1, 0);
10647        return ret;
10648    case TARGET_NR_uname:
10649        /* no need to transcode because we use the linux syscall */
10650        {
10651            struct new_utsname * buf;
10652
10653            if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10654                return -TARGET_EFAULT;
10655            ret = get_errno(sys_uname(buf));
10656            if (!is_error(ret)) {
10657                /* Overwrite the native machine name with whatever is being
10658                   emulated. */
10659                g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10660                          sizeof(buf->machine));
10661                /* Allow the user to override the reported release.  */
10662                if (qemu_uname_release && *qemu_uname_release) {
10663                    g_strlcpy(buf->release, qemu_uname_release,
10664                              sizeof(buf->release));
10665                }
10666            }
10667            unlock_user_struct(buf, arg1, 1);
10668        }
10669        return ret;
10670#ifdef TARGET_I386
10671    case TARGET_NR_modify_ldt:
10672        return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10673#if !defined(TARGET_X86_64)
10674    case TARGET_NR_vm86:
10675        return do_vm86(cpu_env, arg1, arg2);
10676#endif
10677#endif
10678#if defined(TARGET_NR_adjtimex)
10679    case TARGET_NR_adjtimex:
10680        {
10681            struct timex host_buf;
10682
10683            if (target_to_host_timex(&host_buf, arg1) != 0) {
10684                return -TARGET_EFAULT;
10685            }
10686            ret = get_errno(adjtimex(&host_buf));
10687            if (!is_error(ret)) {
10688                if (host_to_target_timex(arg1, &host_buf) != 0) {
10689                    return -TARGET_EFAULT;
10690                }
10691            }
10692        }
10693        return ret;
10694#endif
10695#if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10696    case TARGET_NR_clock_adjtime:
10697        {
10698            struct timex htx, *phtx = &htx;
10699
10700            if (target_to_host_timex(phtx, arg2) != 0) {
10701                return -TARGET_EFAULT;
10702            }
10703            ret = get_errno(clock_adjtime(arg1, phtx));
10704            if (!is_error(ret) && phtx) {
10705                if (host_to_target_timex(arg2, phtx) != 0) {
10706                    return -TARGET_EFAULT;
10707                }
10708            }
10709        }
10710        return ret;
10711#endif
10712#if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10713    case TARGET_NR_clock_adjtime64:
10714        {
10715            struct timex htx;
10716
10717            if (target_to_host_timex64(&htx, arg2) != 0) {
10718                return -TARGET_EFAULT;
10719            }
10720            ret = get_errno(clock_adjtime(arg1, &htx));
10721            if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10722                    return -TARGET_EFAULT;
10723            }
10724        }
10725        return ret;
10726#endif
10727    case TARGET_NR_getpgid:
10728        return get_errno(getpgid(arg1));
10729    case TARGET_NR_fchdir:
10730        return get_errno(fchdir(arg1));
10731    case TARGET_NR_personality:
10732        return get_errno(personality(arg1));
10733#ifdef TARGET_NR__llseek /* Not on alpha */
10734    case TARGET_NR__llseek:
10735        {
10736            int64_t res;
10737#if !defined(__NR_llseek)
10738            res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10739            if (res == -1) {
10740                ret = get_errno(res);
10741            } else {
10742                ret = 0;
10743            }
10744#else
10745            ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10746#endif
10747            if ((ret == 0) && put_user_s64(res, arg4)) {
10748                return -TARGET_EFAULT;
10749            }
10750        }
10751        return ret;
10752#endif
10753#ifdef TARGET_NR_getdents
10754    case TARGET_NR_getdents:
10755        return do_getdents(arg1, arg2, arg3);
10756#endif /* TARGET_NR_getdents */
10757#if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10758    case TARGET_NR_getdents64:
10759        return do_getdents64(arg1, arg2, arg3);
10760#endif /* TARGET_NR_getdents64 */
10761#if defined(TARGET_NR__newselect)
10762    case TARGET_NR__newselect:
10763        return do_select(arg1, arg2, arg3, arg4, arg5);
10764#endif
10765#ifdef TARGET_NR_poll
10766    case TARGET_NR_poll:
10767        return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10768#endif
10769#ifdef TARGET_NR_ppoll
10770    case TARGET_NR_ppoll:
10771        return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10772#endif
10773#ifdef TARGET_NR_ppoll_time64
10774    case TARGET_NR_ppoll_time64:
10775        return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10776#endif
10777    case TARGET_NR_flock:
10778        /* NOTE: the flock constant seems to be the same for every
10779           Linux platform */
10780        return get_errno(safe_flock(arg1, arg2));
10781    case TARGET_NR_readv:
10782        {
10783            struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10784            if (vec != NULL) {
10785                ret = get_errno(safe_readv(arg1, vec, arg3));
10786                unlock_iovec(vec, arg2, arg3, 1);
10787            } else {
10788                ret = -host_to_target_errno(errno);
10789            }
10790        }
10791        return ret;
10792    case TARGET_NR_writev:
10793        {
10794            struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10795            if (vec != NULL) {
10796                ret = get_errno(safe_writev(arg1, vec, arg3));
10797                unlock_iovec(vec, arg2, arg3, 0);
10798            } else {
10799                ret = -host_to_target_errno(errno);
10800            }
10801        }
10802        return ret;
10803#if defined(TARGET_NR_preadv)
10804    case TARGET_NR_preadv:
10805        {
10806            struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10807            if (vec != NULL) {
10808                unsigned long low, high;
10809
10810                target_to_host_low_high(arg4, arg5, &low, &high);
10811                ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10812                unlock_iovec(vec, arg2, arg3, 1);
10813            } else {
10814                ret = -host_to_target_errno(errno);
10815           }
10816        }
10817        return ret;
10818#endif
10819#if defined(TARGET_NR_pwritev)
10820    case TARGET_NR_pwritev:
10821        {
10822            struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10823            if (vec != NULL) {
10824                unsigned long low, high;
10825
10826                target_to_host_low_high(arg4, arg5, &low, &high);
10827                ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10828                unlock_iovec(vec, arg2, arg3, 0);
10829            } else {
10830                ret = -host_to_target_errno(errno);
10831           }
10832        }
10833        return ret;
10834#endif
10835    case TARGET_NR_getsid:
10836        return get_errno(getsid(arg1));
10837#if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10838    case TARGET_NR_fdatasync:
10839        return get_errno(fdatasync(arg1));
10840#endif
10841    case TARGET_NR_sched_getaffinity:
10842        {
10843            unsigned int mask_size;
10844            unsigned long *mask;
10845
10846            /*
10847             * sched_getaffinity needs multiples of ulong, so need to take
10848             * care of mismatches between target ulong and host ulong sizes.
10849             */
10850            if (arg2 & (sizeof(abi_ulong) - 1)) {
10851                return -TARGET_EINVAL;
10852            }
10853            mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10854
10855            mask = alloca(mask_size);
10856            memset(mask, 0, mask_size);
10857            ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10858
10859            if (!is_error(ret)) {
10860                if (ret > arg2) {
10861                    /* More data returned than the caller's buffer will fit.
10862                     * This only happens if sizeof(abi_long) < sizeof(long)
10863                     * and the caller passed us a buffer holding an odd number
10864                     * of abi_longs. If the host kernel is actually using the
10865                     * extra 4 bytes then fail EINVAL; otherwise we can just
10866                     * ignore them and only copy the interesting part.
10867                     */
10868                    int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10869                    if (numcpus > arg2 * 8) {
10870                        return -TARGET_EINVAL;
10871                    }
10872                    ret = arg2;
10873                }
10874
10875                if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10876                    return -TARGET_EFAULT;
10877                }
10878            }
10879        }
10880        return ret;
10881    case TARGET_NR_sched_setaffinity:
10882        {
10883            unsigned int mask_size;
10884            unsigned long *mask;
10885
10886            /*
10887             * sched_setaffinity needs multiples of ulong, so need to take
10888             * care of mismatches between target ulong and host ulong sizes.
10889             */
10890            if (arg2 & (sizeof(abi_ulong) - 1)) {
10891                return -TARGET_EINVAL;
10892            }
10893            mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10894            mask = alloca(mask_size);
10895
10896            ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10897            if (ret) {
10898                return ret;
10899            }
10900
10901            return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10902        }
10903    case TARGET_NR_getcpu:
10904        {
10905            unsigned cpu, node;
10906            ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10907                                       arg2 ? &node : NULL,
10908                                       NULL));
10909            if (is_error(ret)) {
10910                return ret;
10911            }
10912            if (arg1 && put_user_u32(cpu, arg1)) {
10913                return -TARGET_EFAULT;
10914            }
10915            if (arg2 && put_user_u32(node, arg2)) {
10916                return -TARGET_EFAULT;
10917            }
10918        }
10919        return ret;
10920    case TARGET_NR_sched_setparam:
10921        {
10922            struct target_sched_param *target_schp;
10923            struct sched_param schp;
10924
10925            if (arg2 == 0) {
10926                return -TARGET_EINVAL;
10927            }
10928            if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10929                return -TARGET_EFAULT;
10930            }
10931            schp.sched_priority = tswap32(target_schp->sched_priority);
10932            unlock_user_struct(target_schp, arg2, 0);
10933            return get_errno(sys_sched_setparam(arg1, &schp));
10934        }
10935    case TARGET_NR_sched_getparam:
10936        {
10937            struct target_sched_param *target_schp;
10938            struct sched_param schp;
10939
10940            if (arg2 == 0) {
10941                return -TARGET_EINVAL;
10942            }
10943            ret = get_errno(sys_sched_getparam(arg1, &schp));
10944            if (!is_error(ret)) {
10945                if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10946                    return -TARGET_EFAULT;
10947                }
10948                target_schp->sched_priority = tswap32(schp.sched_priority);
10949                unlock_user_struct(target_schp, arg2, 1);
10950            }
10951        }
10952        return ret;
10953    case TARGET_NR_sched_setscheduler:
10954        {
10955            struct target_sched_param *target_schp;
10956            struct sched_param schp;
10957            if (arg3 == 0) {
10958                return -TARGET_EINVAL;
10959            }
10960            if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10961                return -TARGET_EFAULT;
10962            }
10963            schp.sched_priority = tswap32(target_schp->sched_priority);
10964            unlock_user_struct(target_schp, arg3, 0);
10965            return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10966        }
10967    case TARGET_NR_sched_getscheduler:
10968        return get_errno(sys_sched_getscheduler(arg1));
10969    case TARGET_NR_sched_getattr:
10970        {
10971            struct target_sched_attr *target_scha;
10972            struct sched_attr scha;
10973            if (arg2 == 0) {
10974                return -TARGET_EINVAL;
10975            }
10976            if (arg3 > sizeof(scha)) {
10977                arg3 = sizeof(scha);
10978            }
10979            ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10980            if (!is_error(ret)) {
10981                target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10982                if (!target_scha) {
10983                    return -TARGET_EFAULT;
10984                }
10985                target_scha->size = tswap32(scha.size);
10986                target_scha->sched_policy = tswap32(scha.sched_policy);
10987                target_scha->sched_flags = tswap64(scha.sched_flags);
10988                target_scha->sched_nice = tswap32(scha.sched_nice);
10989                target_scha->sched_priority = tswap32(scha.sched_priority);
10990                target_scha->sched_runtime = tswap64(scha.sched_runtime);
10991                target_scha->sched_deadline = tswap64(scha.sched_deadline);
10992                target_scha->sched_period = tswap64(scha.sched_period);
10993                if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10994                    target_scha->sched_util_min = tswap32(scha.sched_util_min);
10995                    target_scha->sched_util_max = tswap32(scha.sched_util_max);
10996                }
10997                unlock_user(target_scha, arg2, arg3);
10998            }
10999            return ret;
11000        }
11001    case TARGET_NR_sched_setattr:
11002        {
11003            struct target_sched_attr *target_scha;
11004            struct sched_attr scha;
11005            uint32_t size;
11006            int zeroed;
11007            if (arg2 == 0) {
11008                return -TARGET_EINVAL;
11009            }
11010            if (get_user_u32(size, arg2)) {
11011                return -TARGET_EFAULT;
11012            }
11013            if (!size) {
11014                size = offsetof(struct target_sched_attr, sched_util_min);
11015            }
11016            if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11017                if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11018                    return -TARGET_EFAULT;
11019                }
11020                return -TARGET_E2BIG;
11021            }
11022
11023            zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11024            if (zeroed < 0) {
11025                return zeroed;
11026            } else if (zeroed == 0) {
11027                if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11028                    return -TARGET_EFAULT;
11029                }
11030                return -TARGET_E2BIG;
11031            }
11032            if (size > sizeof(struct target_sched_attr)) {
11033                size = sizeof(struct target_sched_attr);
11034            }
11035
11036            target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11037            if (!target_scha) {
11038                return -TARGET_EFAULT;
11039            }
11040            scha.size = size;
11041            scha.sched_policy = tswap32(target_scha->sched_policy);
11042            scha.sched_flags = tswap64(target_scha->sched_flags);
11043            scha.sched_nice = tswap32(target_scha->sched_nice);
11044            scha.sched_priority = tswap32(target_scha->sched_priority);
11045            scha.sched_runtime = tswap64(target_scha->sched_runtime);
11046            scha.sched_deadline = tswap64(target_scha->sched_deadline);
11047            scha.sched_period = tswap64(target_scha->sched_period);
11048            if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11049                scha.sched_util_min = tswap32(target_scha->sched_util_min);
11050                scha.sched_util_max = tswap32(target_scha->sched_util_max);
11051            }
11052            unlock_user(target_scha, arg2, 0);
11053            return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11054        }
11055    case TARGET_NR_sched_yield:
11056        return get_errno(sched_yield());
11057    case TARGET_NR_sched_get_priority_max:
11058        return get_errno(sched_get_priority_max(arg1));
11059    case TARGET_NR_sched_get_priority_min:
11060        return get_errno(sched_get_priority_min(arg1));
11061#ifdef TARGET_NR_sched_rr_get_interval
11062    case TARGET_NR_sched_rr_get_interval:
11063        {
11064            struct timespec ts;
11065            ret = get_errno(sched_rr_get_interval(arg1, &ts));
11066            if (!is_error(ret)) {
11067                ret = host_to_target_timespec(arg2, &ts);
11068            }
11069        }
11070        return ret;
11071#endif
11072#ifdef TARGET_NR_sched_rr_get_interval_time64
11073    case TARGET_NR_sched_rr_get_interval_time64:
11074        {
11075            struct timespec ts;
11076            ret = get_errno(sched_rr_get_interval(arg1, &ts));
11077            if (!is_error(ret)) {
11078                ret = host_to_target_timespec64(arg2, &ts);
11079            }
11080        }
11081        return ret;
11082#endif
11083#if defined(TARGET_NR_nanosleep)
11084    case TARGET_NR_nanosleep:
11085        {
11086            struct timespec req, rem;
11087            target_to_host_timespec(&req, arg1);
11088            ret = get_errno(safe_nanosleep(&req, &rem));
11089            if (is_error(ret) && arg2) {
11090                host_to_target_timespec(arg2, &rem);
11091            }
11092        }
11093        return ret;
11094#endif
11095    case TARGET_NR_prctl:
11096        return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11097        break;
11098#ifdef TARGET_NR_arch_prctl
11099    case TARGET_NR_arch_prctl:
11100        return do_arch_prctl(cpu_env, arg1, arg2);
11101#endif
11102#ifdef TARGET_NR_pread64
11103    case TARGET_NR_pread64:
11104        if (regpairs_aligned(cpu_env, num)) {
11105            arg4 = arg5;
11106            arg5 = arg6;
11107        }
11108        if (arg2 == 0 && arg3 == 0) {
11109            /* Special-case NULL buffer and zero length, which should succeed */
11110            p = 0;
11111        } else {
11112            p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11113            if (!p) {
11114                return -TARGET_EFAULT;
11115            }
11116        }
11117        ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11118        unlock_user(p, arg2, ret);
11119        return ret;
11120    case TARGET_NR_pwrite64:
11121        if (regpairs_aligned(cpu_env, num)) {
11122            arg4 = arg5;
11123            arg5 = arg6;
11124        }
11125        if (arg2 == 0 && arg3 == 0) {
11126            /* Special-case NULL buffer and zero length, which should succeed */
11127            p = 0;
11128        } else {
11129            p = lock_user(VERIFY_READ, arg2, arg3, 1);
11130            if (!p) {
11131                return -TARGET_EFAULT;
11132            }
11133        }
11134        ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11135        unlock_user(p, arg2, 0);
11136        return ret;
11137#endif
11138    case TARGET_NR_getcwd:
11139        if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11140            return -TARGET_EFAULT;
11141        ret = get_errno(sys_getcwd1(p, arg2));
11142        unlock_user(p, arg1, ret);
11143        return ret;
11144    case TARGET_NR_capget:
11145    case TARGET_NR_capset:
11146    {
11147        struct target_user_cap_header *target_header;
11148        struct target_user_cap_data *target_data = NULL;
11149        struct __user_cap_header_struct header;
11150        struct __user_cap_data_struct data[2];
11151        struct __user_cap_data_struct *dataptr = NULL;
11152        int i, target_datalen;
11153        int data_items = 1;
11154
11155        if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11156            return -TARGET_EFAULT;
11157        }
11158        header.version = tswap32(target_header->version);
11159        header.pid = tswap32(target_header->pid);
11160
11161        if (header.version != _LINUX_CAPABILITY_VERSION) {
11162            /* Version 2 and up takes pointer to two user_data structs */
11163            data_items = 2;
11164        }
11165
11166        target_datalen = sizeof(*target_data) * data_items;
11167
11168        if (arg2) {
11169            if (num == TARGET_NR_capget) {
11170                target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11171            } else {
11172                target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11173            }
11174            if (!target_data) {
11175                unlock_user_struct(target_header, arg1, 0);
11176                return -TARGET_EFAULT;
11177            }
11178
11179            if (num == TARGET_NR_capset) {
11180                for (i = 0; i < data_items; i++) {
11181                    data[i].effective = tswap32(target_data[i].effective);
11182                    data[i].permitted = tswap32(target_data[i].permitted);
11183                    data[i].inheritable = tswap32(target_data[i].inheritable);
11184                }
11185            }
11186
11187            dataptr = data;
11188        }
11189
11190        if (num == TARGET_NR_capget) {
11191            ret = get_errno(capget(&header, dataptr));
11192        } else {
11193            ret = get_errno(capset(&header, dataptr));
11194        }
11195
11196        /* The kernel always updates version for both capget and capset */
11197        target_header->version = tswap32(header.version);
11198        unlock_user_struct(target_header, arg1, 1);
11199
11200        if (arg2) {
11201            if (num == TARGET_NR_capget) {
11202                for (i = 0; i < data_items; i++) {
11203                    target_data[i].effective = tswap32(data[i].effective);
11204                    target_data[i].permitted = tswap32(data[i].permitted);
11205                    target_data[i].inheritable = tswap32(data[i].inheritable);
11206                }
11207                unlock_user(target_data, arg2, target_datalen);
11208            } else {
11209                unlock_user(target_data, arg2, 0);
11210            }
11211        }
11212        return ret;
11213    }
11214    case TARGET_NR_sigaltstack:
11215        return do_sigaltstack(arg1, arg2, cpu_env);
11216
11217#ifdef CONFIG_SENDFILE
11218#ifdef TARGET_NR_sendfile
11219    case TARGET_NR_sendfile:
11220    {
11221        off_t *offp = NULL;
11222        off_t off;
11223        if (arg3) {
11224            ret = get_user_sal(off, arg3);
11225            if (is_error(ret)) {
11226                return ret;
11227            }
11228            offp = &off;
11229        }
11230        ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11231        if (!is_error(ret) && arg3) {
11232            abi_long ret2 = put_user_sal(off, arg3);
11233            if (is_error(ret2)) {
11234                ret = ret2;
11235            }
11236        }
11237        return ret;
11238    }
11239#endif
11240#ifdef TARGET_NR_sendfile64
11241    case TARGET_NR_sendfile64:
11242    {
11243        off_t *offp = NULL;
11244        off_t off;
11245        if (arg3) {
11246            ret = get_user_s64(off, arg3);
11247            if (is_error(ret)) {
11248                return ret;
11249            }
11250            offp = &off;
11251        }
11252        ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11253        if (!is_error(ret) && arg3) {
11254            abi_long ret2 = put_user_s64(off, arg3);
11255            if (is_error(ret2)) {
11256                ret = ret2;
11257            }
11258        }
11259        return ret;
11260    }
11261#endif
11262#endif
11263#ifdef TARGET_NR_vfork
11264    case TARGET_NR_vfork:
11265        return get_errno(do_fork(cpu_env,
11266                         CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11267                         0, 0, 0, 0));
11268#endif
11269#ifdef TARGET_NR_ugetrlimit
11270    case TARGET_NR_ugetrlimit:
11271    {
11272        struct rlimit rlim;
11273        int resource = target_to_host_resource(arg1);
11274        ret = get_errno(getrlimit(resource, &rlim));
11275        if (!is_error(ret)) {
11276            struct target_rlimit *target_rlim;
11277            if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11278                return -TARGET_EFAULT;
11279            target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11280            target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11281            unlock_user_struct(target_rlim, arg2, 1);
11282        }
11283        return ret;
11284    }
11285#endif
11286#ifdef TARGET_NR_truncate64
11287    case TARGET_NR_truncate64:
11288        if (!(p = lock_user_string(arg1)))
11289            return -TARGET_EFAULT;
11290        ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11291        unlock_user(p, arg1, 0);
11292        return ret;
11293#endif
11294#ifdef TARGET_NR_ftruncate64
11295    case TARGET_NR_ftruncate64:
11296        return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11297#endif
11298#ifdef TARGET_NR_stat64
11299    case TARGET_NR_stat64:
11300        if (!(p = lock_user_string(arg1))) {
11301            return -TARGET_EFAULT;
11302        }
11303        ret = get_errno(stat(path(p), &st));
11304        unlock_user(p, arg1, 0);
11305        if (!is_error(ret))
11306            ret = host_to_target_stat64(cpu_env, arg2, &st);
11307        return ret;
11308#endif
11309#ifdef TARGET_NR_lstat64
11310    case TARGET_NR_lstat64:
11311        if (!(p = lock_user_string(arg1))) {
11312            return -TARGET_EFAULT;
11313        }
11314        ret = get_errno(lstat(path(p), &st));
11315        unlock_user(p, arg1, 0);
11316        if (!is_error(ret))
11317            ret = host_to_target_stat64(cpu_env, arg2, &st);
11318        return ret;
11319#endif
11320#ifdef TARGET_NR_fstat64
11321    case TARGET_NR_fstat64:
11322        ret = get_errno(fstat(arg1, &st));
11323        if (!is_error(ret))
11324            ret = host_to_target_stat64(cpu_env, arg2, &st);
11325        return ret;
11326#endif
11327#if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11328#ifdef TARGET_NR_fstatat64
11329    case TARGET_NR_fstatat64:
11330#endif
11331#ifdef TARGET_NR_newfstatat
11332    case TARGET_NR_newfstatat:
11333#endif
11334        if (!(p = lock_user_string(arg2))) {
11335            return -TARGET_EFAULT;
11336        }
11337        ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11338        unlock_user(p, arg2, 0);
11339        if (!is_error(ret))
11340            ret = host_to_target_stat64(cpu_env, arg3, &st);
11341        return ret;
11342#endif
11343#if defined(TARGET_NR_statx)
11344    case TARGET_NR_statx:
11345        {
11346            struct target_statx *target_stx;
11347            int dirfd = arg1;
11348            int flags = arg3;
11349
11350            p = lock_user_string(arg2);
11351            if (p == NULL) {
11352                return -TARGET_EFAULT;
11353            }
11354#if defined(__NR_statx)
11355            {
11356                /*
11357                 * It is assumed that struct statx is architecture independent.
11358                 */
11359                struct target_statx host_stx;
11360                int mask = arg4;
11361
11362                ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11363                if (!is_error(ret)) {
11364                    if (host_to_target_statx(&host_stx, arg5) != 0) {
11365                        unlock_user(p, arg2, 0);
11366                        return -TARGET_EFAULT;
11367                    }
11368                }
11369
11370                if (ret != -TARGET_ENOSYS) {
11371                    unlock_user(p, arg2, 0);
11372                    return ret;
11373                }
11374            }
11375#endif
11376            ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11377            unlock_user(p, arg2, 0);
11378
11379            if (!is_error(ret)) {
11380                if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11381                    return -TARGET_EFAULT;
11382                }
11383                memset(target_stx, 0, sizeof(*target_stx));
11384                __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11385                __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11386                __put_user(st.st_ino, &target_stx->stx_ino);
11387                __put_user(st.st_mode, &target_stx->stx_mode);
11388                __put_user(st.st_uid, &target_stx->stx_uid);
11389                __put_user(st.st_gid, &target_stx->stx_gid);
11390                __put_user(st.st_nlink, &target_stx->stx_nlink);
11391                __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11392                __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11393                __put_user(st.st_size, &target_stx->stx_size);
11394                __put_user(st.st_blksize, &target_stx->stx_blksize);
11395                __put_user(st.st_blocks, &target_stx->stx_blocks);
11396                __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11397                __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11398                __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11399                unlock_user_struct(target_stx, arg5, 1);
11400            }
11401        }
11402        return ret;
11403#endif
11404#ifdef TARGET_NR_lchown
11405    case TARGET_NR_lchown:
11406        if (!(p = lock_user_string(arg1)))
11407            return -TARGET_EFAULT;
11408        ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11409        unlock_user(p, arg1, 0);
11410        return ret;
11411#endif
11412#ifdef TARGET_NR_getuid
11413    case TARGET_NR_getuid:
11414        return get_errno(high2lowuid(getuid()));
11415#endif
11416#ifdef TARGET_NR_getgid
11417    case TARGET_NR_getgid:
11418        return get_errno(high2lowgid(getgid()));
11419#endif
11420#ifdef TARGET_NR_geteuid
11421    case TARGET_NR_geteuid:
11422        return get_errno(high2lowuid(geteuid()));
11423#endif
11424#ifdef TARGET_NR_getegid
11425    case TARGET_NR_getegid:
11426        return get_errno(high2lowgid(getegid()));
11427#endif
11428    case TARGET_NR_setreuid:
11429        return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11430    case TARGET_NR_setregid:
11431        return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11432    case TARGET_NR_getgroups:
11433        {
11434            int gidsetsize = arg1;
11435            target_id *target_grouplist;
11436            gid_t *grouplist;
11437            int i;
11438
11439            grouplist = alloca(gidsetsize * sizeof(gid_t));
11440            ret = get_errno(getgroups(gidsetsize, grouplist));
11441            if (gidsetsize == 0)
11442                return ret;
11443            if (!is_error(ret)) {
11444                target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11445                if (!target_grouplist)
11446                    return -TARGET_EFAULT;
11447                for(i = 0;i < ret; i++)
11448                    target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11449                unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11450            }
11451        }
11452        return ret;
11453    case TARGET_NR_setgroups:
11454        {
11455            int gidsetsize = arg1;
11456            target_id *target_grouplist;
11457            gid_t *grouplist = NULL;
11458            int i;
11459            if (gidsetsize) {
11460                grouplist = alloca(gidsetsize * sizeof(gid_t));
11461                target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11462                if (!target_grouplist) {
11463                    return -TARGET_EFAULT;
11464                }
11465                for (i = 0; i < gidsetsize; i++) {
11466                    grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11467                }
11468                unlock_user(target_grouplist, arg2, 0);
11469            }
11470            return get_errno(setgroups(gidsetsize, grouplist));
11471        }
11472    case TARGET_NR_fchown:
11473        return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11474#if defined(TARGET_NR_fchownat)
11475    case TARGET_NR_fchownat:
11476        if (!(p = lock_user_string(arg2))) 
11477            return -TARGET_EFAULT;
11478        ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11479                                 low2highgid(arg4), arg5));
11480        unlock_user(p, arg2, 0);
11481        return ret;
11482#endif
11483#ifdef TARGET_NR_setresuid
11484    case TARGET_NR_setresuid:
11485        return get_errno(sys_setresuid(low2highuid(arg1),
11486                                       low2highuid(arg2),
11487                                       low2highuid(arg3)));
11488#endif
11489#ifdef TARGET_NR_getresuid
11490    case TARGET_NR_getresuid:
11491        {
11492            uid_t ruid, euid, suid;
11493            ret = get_errno(getresuid(&ruid, &euid, &suid));
11494            if (!is_error(ret)) {
11495                if (put_user_id(high2lowuid(ruid), arg1)
11496                    || put_user_id(high2lowuid(euid), arg2)
11497                    || put_user_id(high2lowuid(suid), arg3))
11498                    return -TARGET_EFAULT;
11499            }
11500        }
11501        return ret;
11502#endif
11503#ifdef TARGET_NR_getresgid
11504    case TARGET_NR_setresgid:
11505        return get_errno(sys_setresgid(low2highgid(arg1),
11506                                       low2highgid(arg2),
11507                                       low2highgid(arg3)));
11508#endif
11509#ifdef TARGET_NR_getresgid
11510    case TARGET_NR_getresgid:
11511        {
11512            gid_t rgid, egid, sgid;
11513            ret = get_errno(getresgid(&rgid, &egid, &sgid));
11514            if (!is_error(ret)) {
11515                if (put_user_id(high2lowgid(rgid), arg1)
11516                    || put_user_id(high2lowgid(egid), arg2)
11517                    || put_user_id(high2lowgid(sgid), arg3))
11518                    return -TARGET_EFAULT;
11519            }
11520        }
11521        return ret;
11522#endif
11523#ifdef TARGET_NR_chown
11524    case TARGET_NR_chown:
11525        if (!(p = lock_user_string(arg1)))
11526            return -TARGET_EFAULT;
11527        ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11528        unlock_user(p, arg1, 0);
11529        return ret;
11530#endif
11531    case TARGET_NR_setuid:
11532        return get_errno(sys_setuid(low2highuid(arg1)));
11533    case TARGET_NR_setgid:
11534        return get_errno(sys_setgid(low2highgid(arg1)));
11535    case TARGET_NR_setfsuid:
11536        return get_errno(setfsuid(arg1));
11537    case TARGET_NR_setfsgid:
11538        return get_errno(setfsgid(arg1));
11539
11540#ifdef TARGET_NR_lchown32
11541    case TARGET_NR_lchown32:
11542        if (!(p = lock_user_string(arg1)))
11543            return -TARGET_EFAULT;
11544        ret = get_errno(lchown(p, arg2, arg3));
11545        unlock_user(p, arg1, 0);
11546        return ret;
11547#endif
11548#ifdef TARGET_NR_getuid32
11549    case TARGET_NR_getuid32:
11550        return get_errno(getuid());
11551#endif
11552
11553#if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11554   /* Alpha specific */
11555    case TARGET_NR_getxuid:
11556         {
11557            uid_t euid;
11558            euid=geteuid();
11559            cpu_env->ir[IR_A4]=euid;
11560         }
11561        return get_errno(getuid());
11562#endif
11563#if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11564   /* Alpha specific */
11565    case TARGET_NR_getxgid:
11566         {
11567            uid_t egid;
11568            egid=getegid();
11569            cpu_env->ir[IR_A4]=egid;
11570         }
11571        return get_errno(getgid());
11572#endif
11573#if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11574    /* Alpha specific */
11575    case TARGET_NR_osf_getsysinfo:
11576        ret = -TARGET_EOPNOTSUPP;
11577        switch (arg1) {
11578          case TARGET_GSI_IEEE_FP_CONTROL:
11579            {
11580                uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11581                uint64_t swcr = cpu_env->swcr;
11582
11583                swcr &= ~SWCR_STATUS_MASK;
11584                swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11585
11586                if (put_user_u64 (swcr, arg2))
11587                        return -TARGET_EFAULT;
11588                ret = 0;
11589            }
11590            break;
11591
11592          /* case GSI_IEEE_STATE_AT_SIGNAL:
11593             -- Not implemented in linux kernel.
11594             case GSI_UACPROC:
11595             -- Retrieves current unaligned access state; not much used.
11596             case GSI_PROC_TYPE:
11597             -- Retrieves implver information; surely not used.
11598             case GSI_GET_HWRPB:
11599             -- Grabs a copy of the HWRPB; surely not used.
11600          */
11601        }
11602        return ret;
11603#endif
11604#if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11605    /* Alpha specific */
11606    case TARGET_NR_osf_setsysinfo:
11607        ret = -TARGET_EOPNOTSUPP;
11608        switch (arg1) {
11609          case TARGET_SSI_IEEE_FP_CONTROL:
11610            {
11611                uint64_t swcr, fpcr;
11612
11613                if (get_user_u64 (swcr, arg2)) {
11614                    return -TARGET_EFAULT;
11615                }
11616
11617                /*
11618                 * The kernel calls swcr_update_status to update the
11619                 * status bits from the fpcr at every point that it
11620                 * could be queried.  Therefore, we store the status
11621                 * bits only in FPCR.
11622                 */
11623                cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11624
11625                fpcr = cpu_alpha_load_fpcr(cpu_env);
11626                fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11627                fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11628                cpu_alpha_store_fpcr(cpu_env, fpcr);
11629                ret = 0;
11630            }
11631            break;
11632
11633          case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11634            {
11635                uint64_t exc, fpcr, fex;
11636
11637                if (get_user_u64(exc, arg2)) {
11638                    return -TARGET_EFAULT;
11639                }
11640                exc &= SWCR_STATUS_MASK;
11641                fpcr = cpu_alpha_load_fpcr(cpu_env);
11642
11643                /* Old exceptions are not signaled.  */
11644                fex = alpha_ieee_fpcr_to_swcr(fpcr);
11645                fex = exc & ~fex;
11646                fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11647                fex &= (cpu_env)->swcr;
11648
11649                /* Update the hardware fpcr.  */
11650                fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11651                cpu_alpha_store_fpcr(cpu_env, fpcr);
11652
11653                if (fex) {
11654                    int si_code = TARGET_FPE_FLTUNK;
11655                    target_siginfo_t info;
11656
11657                    if (fex & SWCR_TRAP_ENABLE_DNO) {
11658                        si_code = TARGET_FPE_FLTUND;
11659                    }
11660                    if (fex & SWCR_TRAP_ENABLE_INE) {
11661                        si_code = TARGET_FPE_FLTRES;
11662                    }
11663                    if (fex & SWCR_TRAP_ENABLE_UNF) {
11664                        si_code = TARGET_FPE_FLTUND;
11665                    }
11666                    if (fex & SWCR_TRAP_ENABLE_OVF) {
11667                        si_code = TARGET_FPE_FLTOVF;
11668                    }
11669                    if (fex & SWCR_TRAP_ENABLE_DZE) {
11670                        si_code = TARGET_FPE_FLTDIV;
11671                    }
11672                    if (fex & SWCR_TRAP_ENABLE_INV) {
11673                        si_code = TARGET_FPE_FLTINV;
11674                    }
11675
11676                    info.si_signo = SIGFPE;
11677                    info.si_errno = 0;
11678                    info.si_code = si_code;
11679                    info._sifields._sigfault._addr = (cpu_env)->pc;
11680                    queue_signal(cpu_env, info.si_signo,
11681                                 QEMU_SI_FAULT, &info);
11682                }
11683                ret = 0;
11684            }
11685            break;
11686
11687          /* case SSI_NVPAIRS:
11688             -- Used with SSIN_UACPROC to enable unaligned accesses.
11689             case SSI_IEEE_STATE_AT_SIGNAL:
11690             case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11691             -- Not implemented in linux kernel
11692          */
11693        }
11694        return ret;
11695#endif
11696#ifdef TARGET_NR_osf_sigprocmask
11697    /* Alpha specific.  */
11698    case TARGET_NR_osf_sigprocmask:
11699        {
11700            abi_ulong mask;
11701            int how;
11702            sigset_t set, oldset;
11703
11704            switch(arg1) {
11705            case TARGET_SIG_BLOCK:
11706                how = SIG_BLOCK;
11707                break;
11708            case TARGET_SIG_UNBLOCK:
11709                how = SIG_UNBLOCK;
11710                break;
11711            case TARGET_SIG_SETMASK:
11712                how = SIG_SETMASK;
11713                break;
11714            default:
11715                return -TARGET_EINVAL;
11716            }
11717            mask = arg2;
11718            target_to_host_old_sigset(&set, &mask);
11719            ret = do_sigprocmask(how, &set, &oldset);
11720            if (!ret) {
11721                host_to_target_old_sigset(&mask, &oldset);
11722                ret = mask;
11723            }
11724        }
11725        return ret;
11726#endif
11727
11728#ifdef TARGET_NR_getgid32
11729    case TARGET_NR_getgid32:
11730        return get_errno(getgid());
11731#endif
11732#ifdef TARGET_NR_geteuid32
11733    case TARGET_NR_geteuid32:
11734        return get_errno(geteuid());
11735#endif
11736#ifdef TARGET_NR_getegid32
11737    case TARGET_NR_getegid32:
11738        return get_errno(getegid());
11739#endif
11740#ifdef TARGET_NR_setreuid32
11741    case TARGET_NR_setreuid32:
11742        return get_errno(setreuid(arg1, arg2));
11743#endif
11744#ifdef TARGET_NR_setregid32
11745    case TARGET_NR_setregid32:
11746        return get_errno(setregid(arg1, arg2));
11747#endif
11748#ifdef TARGET_NR_getgroups32
11749    case TARGET_NR_getgroups32:
11750        {
11751            int gidsetsize = arg1;
11752            uint32_t *target_grouplist;
11753            gid_t *grouplist;
11754            int i;
11755
11756            grouplist = alloca(gidsetsize * sizeof(gid_t));
11757            ret = get_errno(getgroups(gidsetsize, grouplist));
11758            if (gidsetsize == 0)
11759                return ret;
11760            if (!is_error(ret)) {
11761                target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11762                if (!target_grouplist) {
11763                    return -TARGET_EFAULT;
11764                }
11765                for(i = 0;i < ret; i++)
11766                    target_grouplist[i] = tswap32(grouplist[i]);
11767                unlock_user(target_grouplist, arg2, gidsetsize * 4);
11768            }
11769        }
11770        return ret;
11771#endif
11772#ifdef TARGET_NR_setgroups32
11773    case TARGET_NR_setgroups32:
11774        {
11775            int gidsetsize = arg1;
11776            uint32_t *target_grouplist;
11777            gid_t *grouplist;
11778            int i;
11779
11780            grouplist = alloca(gidsetsize * sizeof(gid_t));
11781            target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11782            if (!target_grouplist) {
11783                return -TARGET_EFAULT;
11784            }
11785            for(i = 0;i < gidsetsize; i++)
11786                grouplist[i] = tswap32(target_grouplist[i]);
11787            unlock_user(target_grouplist, arg2, 0);
11788            return get_errno(setgroups(gidsetsize, grouplist));
11789        }
11790#endif
11791#ifdef TARGET_NR_fchown32
11792    case TARGET_NR_fchown32:
11793        return get_errno(fchown(arg1, arg2, arg3));
11794#endif
11795#ifdef TARGET_NR_setresuid32
11796    case TARGET_NR_setresuid32:
11797        return get_errno(sys_setresuid(arg1, arg2, arg3));
11798#endif
11799#ifdef TARGET_NR_getresuid32
11800    case TARGET_NR_getresuid32:
11801        {
11802            uid_t ruid, euid, suid;
11803            ret = get_errno(getresuid(&ruid, &euid, &suid));
11804            if (!is_error(ret)) {
11805                if (put_user_u32(ruid, arg1)
11806                    || put_user_u32(euid, arg2)
11807                    || put_user_u32(suid, arg3))
11808                    return -TARGET_EFAULT;
11809            }
11810        }
11811        return ret;
11812#endif
11813#ifdef TARGET_NR_setresgid32
11814    case TARGET_NR_setresgid32:
11815        return get_errno(sys_setresgid(arg1, arg2, arg3));
11816#endif
11817#ifdef TARGET_NR_getresgid32
11818    case TARGET_NR_getresgid32:
11819        {
11820            gid_t rgid, egid, sgid;
11821            ret = get_errno(getresgid(&rgid, &egid, &sgid));
11822            if (!is_error(ret)) {
11823                if (put_user_u32(rgid, arg1)
11824                    || put_user_u32(egid, arg2)
11825                    || put_user_u32(sgid, arg3))
11826                    return -TARGET_EFAULT;
11827            }
11828        }
11829        return ret;
11830#endif
11831#ifdef TARGET_NR_chown32
11832    case TARGET_NR_chown32:
11833        if (!(p = lock_user_string(arg1)))
11834            return -TARGET_EFAULT;
11835        ret = get_errno(chown(p, arg2, arg3));
11836        unlock_user(p, arg1, 0);
11837        return ret;
11838#endif
11839#ifdef TARGET_NR_setuid32
11840    case TARGET_NR_setuid32:
11841        return get_errno(sys_setuid(arg1));
11842#endif
11843#ifdef TARGET_NR_setgid32
11844    case TARGET_NR_setgid32:
11845        return get_errno(sys_setgid(arg1));
11846#endif
11847#ifdef TARGET_NR_setfsuid32
11848    case TARGET_NR_setfsuid32:
11849        return get_errno(setfsuid(arg1));
11850#endif
11851#ifdef TARGET_NR_setfsgid32
11852    case TARGET_NR_setfsgid32:
11853        return get_errno(setfsgid(arg1));
11854#endif
11855#ifdef TARGET_NR_mincore
11856    case TARGET_NR_mincore:
11857        {
11858            void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11859            if (!a) {
11860                return -TARGET_ENOMEM;
11861            }
11862            p = lock_user_string(arg3);
11863            if (!p) {
11864                ret = -TARGET_EFAULT;
11865            } else {
11866                ret = get_errno(mincore(a, arg2, p));
11867                unlock_user(p, arg3, ret);
11868            }
11869            unlock_user(a, arg1, 0);
11870        }
11871        return ret;
11872#endif
11873#ifdef TARGET_NR_arm_fadvise64_64
11874    case TARGET_NR_arm_fadvise64_64:
11875        /* arm_fadvise64_64 looks like fadvise64_64 but
11876         * with different argument order: fd, advice, offset, len
11877         * rather than the usual fd, offset, len, advice.
11878         * Note that offset and len are both 64-bit so appear as
11879         * pairs of 32-bit registers.
11880         */
11881        ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11882                            target_offset64(arg5, arg6), arg2);
11883        return -host_to_target_errno(ret);
11884#endif
11885
11886#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11887
11888#ifdef TARGET_NR_fadvise64_64
11889    case TARGET_NR_fadvise64_64:
11890#if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11891        /* 6 args: fd, advice, offset (high, low), len (high, low) */
11892        ret = arg2;
11893        arg2 = arg3;
11894        arg3 = arg4;
11895        arg4 = arg5;
11896        arg5 = arg6;
11897        arg6 = ret;
11898#else
11899        /* 6 args: fd, offset (high, low), len (high, low), advice */
11900        if (regpairs_aligned(cpu_env, num)) {
11901            /* offset is in (3,4), len in (5,6) and advice in 7 */
11902            arg2 = arg3;
11903            arg3 = arg4;
11904            arg4 = arg5;
11905            arg5 = arg6;
11906            arg6 = arg7;
11907        }
11908#endif
11909        ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11910                            target_offset64(arg4, arg5), arg6);
11911        return -host_to_target_errno(ret);
11912#endif
11913
11914#ifdef TARGET_NR_fadvise64
11915    case TARGET_NR_fadvise64:
11916        /* 5 args: fd, offset (high, low), len, advice */
11917        if (regpairs_aligned(cpu_env, num)) {
11918            /* offset is in (3,4), len in 5 and advice in 6 */
11919            arg2 = arg3;
11920            arg3 = arg4;
11921            arg4 = arg5;
11922            arg5 = arg6;
11923        }
11924        ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11925        return -host_to_target_errno(ret);
11926#endif
11927
11928#else /* not a 32-bit ABI */
11929#if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11930#ifdef TARGET_NR_fadvise64_64
11931    case TARGET_NR_fadvise64_64:
11932#endif
11933#ifdef TARGET_NR_fadvise64
11934    case TARGET_NR_fadvise64:
11935#endif
11936#ifdef TARGET_S390X
11937        switch (arg4) {
11938        case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11939        case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11940        case 6: arg4 = POSIX_FADV_DONTNEED; break;
11941        case 7: arg4 = POSIX_FADV_NOREUSE; break;
11942        default: break;
11943        }
11944#endif
11945        return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11946#endif
11947#endif /* end of 64-bit ABI fadvise handling */
11948
11949#ifdef TARGET_NR_madvise
11950    case TARGET_NR_madvise:
11951        return target_madvise(arg1, arg2, arg3);
11952#endif
11953#ifdef TARGET_NR_fcntl64
11954    case TARGET_NR_fcntl64:
11955    {
11956        int cmd;
11957        struct flock64 fl;
11958        from_flock64_fn *copyfrom = copy_from_user_flock64;
11959        to_flock64_fn *copyto = copy_to_user_flock64;
11960
11961#ifdef TARGET_ARM
11962        if (!cpu_env->eabi) {
11963            copyfrom = copy_from_user_oabi_flock64;
11964            copyto = copy_to_user_oabi_flock64;
11965        }
11966#endif
11967
11968        cmd = target_to_host_fcntl_cmd(arg2);
11969        if (cmd == -TARGET_EINVAL) {
11970            return cmd;
11971        }
11972
11973        switch(arg2) {
11974        case TARGET_F_GETLK64:
11975            ret = copyfrom(&fl, arg3);
11976            if (ret) {
11977                break;
11978            }
11979            ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11980            if (ret == 0) {
11981                ret = copyto(arg3, &fl);
11982            }
11983            break;
11984
11985        case TARGET_F_SETLK64:
11986        case TARGET_F_SETLKW64:
11987            ret = copyfrom(&fl, arg3);
11988            if (ret) {
11989                break;
11990            }
11991            ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11992            break;
11993        default:
11994            ret = do_fcntl(arg1, arg2, arg3);
11995            break;
11996        }
11997        return ret;
11998    }
11999#endif
12000#ifdef TARGET_NR_cacheflush
12001    case TARGET_NR_cacheflush:
12002        /* self-modifying code is handled automatically, so nothing needed */
12003        return 0;
12004#endif
12005#ifdef TARGET_NR_getpagesize
12006    case TARGET_NR_getpagesize:
12007        return TARGET_PAGE_SIZE;
12008#endif
12009    case TARGET_NR_gettid:
12010        return get_errno(sys_gettid());
12011#ifdef TARGET_NR_readahead
12012    case TARGET_NR_readahead:
12013#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12014        if (regpairs_aligned(cpu_env, num)) {
12015            arg2 = arg3;
12016            arg3 = arg4;
12017            arg4 = arg5;
12018        }
12019        ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12020#else
12021        ret = get_errno(readahead(arg1, arg2, arg3));
12022#endif
12023        return ret;
12024#endif
12025#ifdef CONFIG_ATTR
12026#ifdef TARGET_NR_setxattr
12027    case TARGET_NR_listxattr:
12028    case TARGET_NR_llistxattr:
12029    {
12030        void *p, *b = 0;
12031        if (arg2) {
12032            b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12033            if (!b) {
12034                return -TARGET_EFAULT;
12035            }
12036        }
12037        p = lock_user_string(arg1);
12038        if (p) {
12039            if (num == TARGET_NR_listxattr) {
12040                ret = get_errno(listxattr(p, b, arg3));
12041            } else {
12042                ret = get_errno(llistxattr(p, b, arg3));
12043            }
12044        } else {
12045            ret = -TARGET_EFAULT;
12046        }
12047        unlock_user(p, arg1, 0);
12048        unlock_user(b, arg2, arg3);
12049        return ret;
12050    }
12051    case TARGET_NR_flistxattr:
12052    {
12053        void *b = 0;
12054        if (arg2) {
12055            b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12056            if (!b) {
12057                return -TARGET_EFAULT;
12058            }
12059        }
12060        ret = get_errno(flistxattr(arg1, b, arg3));
12061        unlock_user(b, arg2, arg3);
12062        return ret;
12063    }
12064    case TARGET_NR_setxattr:
12065    case TARGET_NR_lsetxattr:
12066        {
12067            void *p, *n, *v = 0;
12068            if (arg3) {
12069                v = lock_user(VERIFY_READ, arg3, arg4, 1);
12070                if (!v) {
12071                    return -TARGET_EFAULT;
12072                }
12073            }
12074            p = lock_user_string(arg1);
12075            n = lock_user_string(arg2);
12076            if (p && n) {
12077                if (num == TARGET_NR_setxattr) {
12078                    ret = get_errno(setxattr(p, n, v, arg4, arg5));
12079                } else {
12080                    ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12081                }
12082            } else {
12083                ret = -TARGET_EFAULT;
12084            }
12085            unlock_user(p, arg1, 0);
12086            unlock_user(n, arg2, 0);
12087            unlock_user(v, arg3, 0);
12088        }
12089        return ret;
12090    case TARGET_NR_fsetxattr:
12091        {
12092            void *n, *v = 0;
12093            if (arg3) {
12094                v = lock_user(VERIFY_READ, arg3, arg4, 1);
12095                if (!v) {
12096                    return -TARGET_EFAULT;
12097                }
12098            }
12099            n = lock_user_string(arg2);
12100            if (n) {
12101                ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12102            } else {
12103                ret = -TARGET_EFAULT;
12104            }
12105            unlock_user(n, arg2, 0);
12106            unlock_user(v, arg3, 0);
12107        }
12108        return ret;
12109    case TARGET_NR_getxattr:
12110    case TARGET_NR_lgetxattr:
12111        {
12112            void *p, *n, *v = 0;
12113            if (arg3) {
12114                v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12115                if (!v) {
12116                    return -TARGET_EFAULT;
12117                }
12118            }
12119            p = lock_user_string(arg1);
12120            n = lock_user_string(arg2);
12121            if (p && n) {
12122                if (num == TARGET_NR_getxattr) {
12123                    ret = get_errno(getxattr(p, n, v, arg4));
12124                } else {
12125                    ret = get_errno(lgetxattr(p, n, v, arg4));
12126                }
12127            } else {
12128                ret = -TARGET_EFAULT;
12129            }
12130            unlock_user(p, arg1, 0);
12131            unlock_user(n, arg2, 0);
12132            unlock_user(v, arg3, arg4);
12133        }
12134        return ret;
12135    case TARGET_NR_fgetxattr:
12136        {
12137            void *n, *v = 0;
12138            if (arg3) {
12139                v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12140                if (!v) {
12141                    return -TARGET_EFAULT;
12142                }
12143            }
12144            n = lock_user_string(arg2);
12145            if (n) {
12146                ret = get_errno(fgetxattr(arg1, n, v, arg4));
12147            } else {
12148                ret = -TARGET_EFAULT;
12149            }
12150            unlock_user(n, arg2, 0);
12151            unlock_user(v, arg3, arg4);
12152        }
12153        return ret;
12154    case TARGET_NR_removexattr:
12155    case TARGET_NR_lremovexattr:
12156        {
12157            void *p, *n;
12158            p = lock_user_string(arg1);
12159            n = lock_user_string(arg2);
12160            if (p && n) {
12161                if (num == TARGET_NR_removexattr) {
12162                    ret = get_errno(removexattr(p, n));
12163                } else {
12164                    ret = get_errno(lremovexattr(p, n));
12165                }
12166            } else {
12167                ret = -TARGET_EFAULT;
12168            }
12169            unlock_user(p, arg1, 0);
12170            unlock_user(n, arg2, 0);
12171        }
12172        return ret;
12173    case TARGET_NR_fremovexattr:
12174        {
12175            void *n;
12176            n = lock_user_string(arg2);
12177            if (n) {
12178                ret = get_errno(fremovexattr(arg1, n));
12179            } else {
12180                ret = -TARGET_EFAULT;
12181            }
12182            unlock_user(n, arg2, 0);
12183        }
12184        return ret;
12185#endif
12186#endif /* CONFIG_ATTR */
12187#ifdef TARGET_NR_set_thread_area
12188    case TARGET_NR_set_thread_area:
12189#if defined(TARGET_MIPS)
12190      cpu_env->active_tc.CP0_UserLocal = arg1;
12191      return 0;
12192#elif defined(TARGET_CRIS)
12193      if (arg1 & 0xff)
12194          ret = -TARGET_EINVAL;
12195      else {
12196          cpu_env->pregs[PR_PID] = arg1;
12197          ret = 0;
12198      }
12199      return ret;
12200#elif defined(TARGET_I386) && defined(TARGET_ABI32)
12201      return do_set_thread_area(cpu_env, arg1);
12202#elif defined(TARGET_M68K)
12203      {
12204          TaskState *ts = cpu->opaque;
12205          ts->tp_value = arg1;
12206          return 0;
12207      }
12208#else
12209      return -TARGET_ENOSYS;
12210#endif
12211#endif
12212#ifdef TARGET_NR_get_thread_area
12213    case TARGET_NR_get_thread_area:
12214#if defined(TARGET_I386) && defined(TARGET_ABI32)
12215        return do_get_thread_area(cpu_env, arg1);
12216#elif defined(TARGET_M68K)
12217        {
12218            TaskState *ts = cpu->opaque;
12219            return ts->tp_value;
12220        }
12221#else
12222        return -TARGET_ENOSYS;
12223#endif
12224#endif
12225#ifdef TARGET_NR_getdomainname
12226    case TARGET_NR_getdomainname:
12227        return -TARGET_ENOSYS;
12228#endif
12229
12230#ifdef TARGET_NR_clock_settime
12231    case TARGET_NR_clock_settime:
12232    {
12233        struct timespec ts;
12234
12235        ret = target_to_host_timespec(&ts, arg2);
12236        if (!is_error(ret)) {
12237            ret = get_errno(clock_settime(arg1, &ts));
12238        }
12239        return ret;
12240    }
12241#endif
12242#ifdef TARGET_NR_clock_settime64
12243    case TARGET_NR_clock_settime64:
12244    {
12245        struct timespec ts;
12246
12247        ret = target_to_host_timespec64(&ts, arg2);
12248        if (!is_error(ret)) {
12249            ret = get_errno(clock_settime(arg1, &ts));
12250        }
12251        return ret;
12252    }
12253#endif
12254#ifdef TARGET_NR_clock_gettime
12255    case TARGET_NR_clock_gettime:
12256    {
12257        struct timespec ts;
12258        ret = get_errno(clock_gettime(arg1, &ts));
12259        if (!is_error(ret)) {
12260            ret = host_to_target_timespec(arg2, &ts);
12261        }
12262        return ret;
12263    }
12264#endif
12265#ifdef TARGET_NR_clock_gettime64
12266    case TARGET_NR_clock_gettime64:
12267    {
12268        struct timespec ts;
12269        ret = get_errno(clock_gettime(arg1, &ts));
12270        if (!is_error(ret)) {
12271            ret = host_to_target_timespec64(arg2, &ts);
12272        }
12273        return ret;
12274    }
12275#endif
12276#ifdef TARGET_NR_clock_getres
12277    case TARGET_NR_clock_getres:
12278    {
12279        struct timespec ts;
12280        ret = get_errno(clock_getres(arg1, &ts));
12281        if (!is_error(ret)) {
12282            host_to_target_timespec(arg2, &ts);
12283        }
12284        return ret;
12285    }
12286#endif
12287#ifdef TARGET_NR_clock_getres_time64
12288    case TARGET_NR_clock_getres_time64:
12289    {
12290        struct timespec ts;
12291        ret = get_errno(clock_getres(arg1, &ts));
12292        if (!is_error(ret)) {
12293            host_to_target_timespec64(arg2, &ts);
12294        }
12295        return ret;
12296    }
12297#endif
12298#ifdef TARGET_NR_clock_nanosleep
12299    case TARGET_NR_clock_nanosleep:
12300    {
12301        struct timespec ts;
12302        if (target_to_host_timespec(&ts, arg3)) {
12303            return -TARGET_EFAULT;
12304        }
12305        ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12306                                             &ts, arg4 ? &ts : NULL));
12307        /*
12308         * if the call is interrupted by a signal handler, it fails
12309         * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12310         * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12311         */
12312        if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12313            host_to_target_timespec(arg4, &ts)) {
12314              return -TARGET_EFAULT;
12315        }
12316
12317        return ret;
12318    }
12319#endif
12320#ifdef TARGET_NR_clock_nanosleep_time64
12321    case TARGET_NR_clock_nanosleep_time64:
12322    {
12323        struct timespec ts;
12324
12325        if (target_to_host_timespec64(&ts, arg3)) {
12326            return -TARGET_EFAULT;
12327        }
12328
12329        ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12330                                             &ts, arg4 ? &ts : NULL));
12331
12332        if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12333            host_to_target_timespec64(arg4, &ts)) {
12334            return -TARGET_EFAULT;
12335        }
12336        return ret;
12337    }
12338#endif
12339
12340#if defined(TARGET_NR_set_tid_address)
12341    case TARGET_NR_set_tid_address:
12342    {
12343        TaskState *ts = cpu->opaque;
12344        ts->child_tidptr = arg1;
12345        /* do not call host set_tid_address() syscall, instead return tid() */
12346        return get_errno(sys_gettid());
12347    }
12348#endif
12349
12350    case TARGET_NR_tkill:
12351        return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12352
12353    case TARGET_NR_tgkill:
12354        return get_errno(safe_tgkill((int)arg1, (int)arg2,
12355                         target_to_host_signal(arg3)));
12356
12357#ifdef TARGET_NR_set_robust_list
12358    case TARGET_NR_set_robust_list:
12359    case TARGET_NR_get_robust_list:
12360        /* The ABI for supporting robust futexes has userspace pass
12361         * the kernel a pointer to a linked list which is updated by
12362         * userspace after the syscall; the list is walked by the kernel
12363         * when the thread exits. Since the linked list in QEMU guest
12364         * memory isn't a valid linked list for the host and we have
12365         * no way to reliably intercept the thread-death event, we can't
12366         * support these. Silently return ENOSYS so that guest userspace
12367         * falls back to a non-robust futex implementation (which should
12368         * be OK except in the corner case of the guest crashing while
12369         * holding a mutex that is shared with another process via
12370         * shared memory).
12371         */
12372        return -TARGET_ENOSYS;
12373#endif
12374
12375#if defined(TARGET_NR_utimensat)
12376    case TARGET_NR_utimensat:
12377        {
12378            struct timespec *tsp, ts[2];
12379            if (!arg3) {
12380                tsp = NULL;
12381            } else {
12382                if (target_to_host_timespec(ts, arg3)) {
12383                    return -TARGET_EFAULT;
12384                }
12385                if (target_to_host_timespec(ts + 1, arg3 +
12386                                            sizeof(struct target_timespec))) {
12387                    return -TARGET_EFAULT;
12388                }
12389                tsp = ts;
12390            }
12391            if (!arg2)
12392                ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12393            else {
12394                if (!(p = lock_user_string(arg2))) {
12395                    return -TARGET_EFAULT;
12396                }
12397                ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12398                unlock_user(p, arg2, 0);
12399            }
12400        }
12401        return ret;
12402#endif
12403#ifdef TARGET_NR_utimensat_time64
12404    case TARGET_NR_utimensat_time64:
12405        {
12406            struct timespec *tsp, ts[2];
12407            if (!arg3) {
12408                tsp = NULL;
12409            } else {
12410                if (target_to_host_timespec64(ts, arg3)) {
12411                    return -TARGET_EFAULT;
12412                }
12413                if (target_to_host_timespec64(ts + 1, arg3 +
12414                                     sizeof(struct target__kernel_timespec))) {
12415                    return -TARGET_EFAULT;
12416                }
12417                tsp = ts;
12418            }
12419            if (!arg2)
12420                ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12421            else {
12422                p = lock_user_string(arg2);
12423                if (!p) {
12424                    return -TARGET_EFAULT;
12425                }
12426                ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12427                unlock_user(p, arg2, 0);
12428            }
12429        }
12430        return ret;
12431#endif
12432#ifdef TARGET_NR_futex
12433    case TARGET_NR_futex:
12434        return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12435#endif
12436#ifdef TARGET_NR_futex_time64
12437    case TARGET_NR_futex_time64:
12438        return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12439#endif
12440#ifdef CONFIG_INOTIFY
12441#if defined(TARGET_NR_inotify_init)
12442    case TARGET_NR_inotify_init:
12443        ret = get_errno(inotify_init());
12444        if (ret >= 0) {
12445            fd_trans_register(ret, &target_inotify_trans);
12446        }
12447        return ret;
12448#endif
12449#if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12450    case TARGET_NR_inotify_init1:
12451        ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12452                                          fcntl_flags_tbl)));
12453        if (ret >= 0) {
12454            fd_trans_register(ret, &target_inotify_trans);
12455        }
12456        return ret;
12457#endif
12458#if defined(TARGET_NR_inotify_add_watch)
12459    case TARGET_NR_inotify_add_watch:
12460        p = lock_user_string(arg2);
12461        ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12462        unlock_user(p, arg2, 0);
12463        return ret;
12464#endif
12465#if defined(TARGET_NR_inotify_rm_watch)
12466    case TARGET_NR_inotify_rm_watch:
12467        return get_errno(inotify_rm_watch(arg1, arg2));
12468#endif
12469#endif
12470
12471#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12472    case TARGET_NR_mq_open:
12473        {
12474            struct mq_attr posix_mq_attr;
12475            struct mq_attr *pposix_mq_attr;
12476            int host_flags;
12477
12478            host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12479            pposix_mq_attr = NULL;
12480            if (arg4) {
12481                if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12482                    return -TARGET_EFAULT;
12483                }
12484                pposix_mq_attr = &posix_mq_attr;
12485            }
12486            p = lock_user_string(arg1 - 1);
12487            if (!p) {
12488                return -TARGET_EFAULT;
12489            }
12490            ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12491            unlock_user (p, arg1, 0);
12492        }
12493        return ret;
12494
12495    case TARGET_NR_mq_unlink:
12496        p = lock_user_string(arg1 - 1);
12497        if (!p) {
12498            return -TARGET_EFAULT;
12499        }
12500        ret = get_errno(mq_unlink(p));
12501        unlock_user (p, arg1, 0);
12502        return ret;
12503
12504#ifdef TARGET_NR_mq_timedsend
12505    case TARGET_NR_mq_timedsend:
12506        {
12507            struct timespec ts;
12508
12509            p = lock_user (VERIFY_READ, arg2, arg3, 1);
12510            if (arg5 != 0) {
12511                if (target_to_host_timespec(&ts, arg5)) {
12512                    return -TARGET_EFAULT;
12513                }
12514                ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12515                if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12516                    return -TARGET_EFAULT;
12517                }
12518            } else {
12519                ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12520            }
12521            unlock_user (p, arg2, arg3);
12522        }
12523        return ret;
12524#endif
12525#ifdef TARGET_NR_mq_timedsend_time64
12526    case TARGET_NR_mq_timedsend_time64:
12527        {
12528            struct timespec ts;
12529
12530            p = lock_user(VERIFY_READ, arg2, arg3, 1);
12531            if (arg5 != 0) {
12532                if (target_to_host_timespec64(&ts, arg5)) {
12533                    return -TARGET_EFAULT;
12534                }
12535                ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12536                if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12537                    return -TARGET_EFAULT;
12538                }
12539            } else {
12540                ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12541            }
12542            unlock_user(p, arg2, arg3);
12543        }
12544        return ret;
12545#endif
12546
12547#ifdef TARGET_NR_mq_timedreceive
12548    case TARGET_NR_mq_timedreceive:
12549        {
12550            struct timespec ts;
12551            unsigned int prio;
12552
12553            p = lock_user (VERIFY_READ, arg2, arg3, 1);
12554            if (arg5 != 0) {
12555                if (target_to_host_timespec(&ts, arg5)) {
12556                    return -TARGET_EFAULT;
12557                }
12558                ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12559                                                     &prio, &ts));
12560                if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12561                    return -TARGET_EFAULT;
12562                }
12563            } else {
12564                ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12565                                                     &prio, NULL));
12566            }
12567            unlock_user (p, arg2, arg3);
12568            if (arg4 != 0)
12569                put_user_u32(prio, arg4);
12570        }
12571        return ret;
12572#endif
12573#ifdef TARGET_NR_mq_timedreceive_time64
12574    case TARGET_NR_mq_timedreceive_time64:
12575        {
12576            struct timespec ts;
12577            unsigned int prio;
12578
12579            p = lock_user(VERIFY_READ, arg2, arg3, 1);
12580            if (arg5 != 0) {
12581                if (target_to_host_timespec64(&ts, arg5)) {
12582                    return -TARGET_EFAULT;
12583                }
12584                ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12585                                                     &prio, &ts));
12586                if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12587                    return -TARGET_EFAULT;
12588                }
12589            } else {
12590                ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12591                                                     &prio, NULL));
12592            }
12593            unlock_user(p, arg2, arg3);
12594            if (arg4 != 0) {
12595                put_user_u32(prio, arg4);
12596            }
12597        }
12598        return ret;
12599#endif
12600
12601    /* Not implemented for now... */
12602/*     case TARGET_NR_mq_notify: */
12603/*         break; */
12604
12605    case TARGET_NR_mq_getsetattr:
12606        {
12607            struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12608            ret = 0;
12609            if (arg2 != 0) {
12610                copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12611                ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12612                                           &posix_mq_attr_out));
12613            } else if (arg3 != 0) {
12614                ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12615            }
12616            if (ret == 0 && arg3 != 0) {
12617                copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12618            }
12619        }
12620        return ret;
12621#endif
12622
12623#ifdef CONFIG_SPLICE
12624#ifdef TARGET_NR_tee
12625    case TARGET_NR_tee:
12626        {
12627            ret = get_errno(tee(arg1,arg2,arg3,arg4));
12628        }
12629        return ret;
12630#endif
12631#ifdef TARGET_NR_splice
12632    case TARGET_NR_splice:
12633        {
12634            loff_t loff_in, loff_out;
12635            loff_t *ploff_in = NULL, *ploff_out = NULL;
12636            if (arg2) {
12637                if (get_user_u64(loff_in, arg2)) {
12638                    return -TARGET_EFAULT;
12639                }
12640                ploff_in = &loff_in;
12641            }
12642            if (arg4) {
12643                if (get_user_u64(loff_out, arg4)) {
12644                    return -TARGET_EFAULT;
12645                }
12646                ploff_out = &loff_out;
12647            }
12648            ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12649            if (arg2) {
12650                if (put_user_u64(loff_in, arg2)) {
12651                    return -TARGET_EFAULT;
12652                }
12653            }
12654            if (arg4) {
12655                if (put_user_u64(loff_out, arg4)) {
12656                    return -TARGET_EFAULT;
12657                }
12658            }
12659        }
12660        return ret;
12661#endif
12662#ifdef TARGET_NR_vmsplice
12663        case TARGET_NR_vmsplice:
12664        {
12665            struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12666            if (vec != NULL) {
12667                ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12668                unlock_iovec(vec, arg2, arg3, 0);
12669            } else {
12670                ret = -host_to_target_errno(errno);
12671            }
12672        }
12673        return ret;
12674#endif
12675#endif /* CONFIG_SPLICE */
12676#ifdef CONFIG_EVENTFD
12677#if defined(TARGET_NR_eventfd)
12678    case TARGET_NR_eventfd:
12679        ret = get_errno(eventfd(arg1, 0));
12680        if (ret >= 0) {
12681            fd_trans_register(ret, &target_eventfd_trans);
12682        }
12683        return ret;
12684#endif
12685#if defined(TARGET_NR_eventfd2)
12686    case TARGET_NR_eventfd2:
12687    {
12688        int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12689        if (arg2 & TARGET_O_NONBLOCK) {
12690            host_flags |= O_NONBLOCK;
12691        }
12692        if (arg2 & TARGET_O_CLOEXEC) {
12693            host_flags |= O_CLOEXEC;
12694        }
12695        ret = get_errno(eventfd(arg1, host_flags));
12696        if (ret >= 0) {
12697            fd_trans_register(ret, &target_eventfd_trans);
12698        }
12699        return ret;
12700    }
12701#endif
12702#endif /* CONFIG_EVENTFD  */
12703#if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12704    case TARGET_NR_fallocate:
12705#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12706        ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12707                                  target_offset64(arg5, arg6)));
12708#else
12709        ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12710#endif
12711        return ret;
12712#endif
12713#if defined(CONFIG_SYNC_FILE_RANGE)
12714#if defined(TARGET_NR_sync_file_range)
12715    case TARGET_NR_sync_file_range:
12716#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12717#if defined(TARGET_MIPS)
12718        ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12719                                        target_offset64(arg5, arg6), arg7));
12720#else
12721        ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12722                                        target_offset64(arg4, arg5), arg6));
12723#endif /* !TARGET_MIPS */
12724#else
12725        ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12726#endif
12727        return ret;
12728#endif
12729#if defined(TARGET_NR_sync_file_range2) || \
12730    defined(TARGET_NR_arm_sync_file_range)
12731#if defined(TARGET_NR_sync_file_range2)
12732    case TARGET_NR_sync_file_range2:
12733#endif
12734#if defined(TARGET_NR_arm_sync_file_range)
12735    case TARGET_NR_arm_sync_file_range:
12736#endif
12737        /* This is like sync_file_range but the arguments are reordered */
12738#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12739        ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12740                                        target_offset64(arg5, arg6), arg2));
12741#else
12742        ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12743#endif
12744        return ret;
12745#endif
12746#endif
12747#if defined(TARGET_NR_signalfd4)
12748    case TARGET_NR_signalfd4:
12749        return do_signalfd4(arg1, arg2, arg4);
12750#endif
12751#if defined(TARGET_NR_signalfd)
12752    case TARGET_NR_signalfd:
12753        return do_signalfd4(arg1, arg2, 0);
12754#endif
12755#if defined(CONFIG_EPOLL)
12756#if defined(TARGET_NR_epoll_create)
12757    case TARGET_NR_epoll_create:
12758        return get_errno(epoll_create(arg1));
12759#endif
12760#if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12761    case TARGET_NR_epoll_create1:
12762        return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12763#endif
12764#if defined(TARGET_NR_epoll_ctl)
12765    case TARGET_NR_epoll_ctl:
12766    {
12767        struct epoll_event ep;
12768        struct epoll_event *epp = 0;
12769        if (arg4) {
12770            if (arg2 != EPOLL_CTL_DEL) {
12771                struct target_epoll_event *target_ep;
12772                if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12773                    return -TARGET_EFAULT;
12774                }
12775                ep.events = tswap32(target_ep->events);
12776                /*
12777                 * The epoll_data_t union is just opaque data to the kernel,
12778                 * so we transfer all 64 bits across and need not worry what
12779                 * actual data type it is.
12780                 */
12781                ep.data.u64 = tswap64(target_ep->data.u64);
12782                unlock_user_struct(target_ep, arg4, 0);
12783            }
12784            /*
12785             * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12786             * non-null pointer, even though this argument is ignored.
12787             *
12788             */
12789            epp = &ep;
12790        }
12791        return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12792    }
12793#endif
12794
12795#if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12796#if defined(TARGET_NR_epoll_wait)
12797    case TARGET_NR_epoll_wait:
12798#endif
12799#if defined(TARGET_NR_epoll_pwait)
12800    case TARGET_NR_epoll_pwait:
12801#endif
12802    {
12803        struct target_epoll_event *target_ep;
12804        struct epoll_event *ep;
12805        int epfd = arg1;
12806        int maxevents = arg3;
12807        int timeout = arg4;
12808
12809        if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12810            return -TARGET_EINVAL;
12811        }
12812
12813        target_ep = lock_user(VERIFY_WRITE, arg2,
12814                              maxevents * sizeof(struct target_epoll_event), 1);
12815        if (!target_ep) {
12816            return -TARGET_EFAULT;
12817        }
12818
12819        ep = g_try_new(struct epoll_event, maxevents);
12820        if (!ep) {
12821            unlock_user(target_ep, arg2, 0);
12822            return -TARGET_ENOMEM;
12823        }
12824
12825        switch (num) {
12826#if defined(TARGET_NR_epoll_pwait)
12827        case TARGET_NR_epoll_pwait:
12828        {
12829            sigset_t *set = NULL;
12830
12831            if (arg5) {
12832                ret = process_sigsuspend_mask(&set, arg5, arg6);
12833                if (ret != 0) {
12834                    break;
12835                }
12836            }
12837
12838            ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12839                                             set, SIGSET_T_SIZE));
12840
12841            if (set) {
12842                finish_sigsuspend_mask(ret);
12843            }
12844            break;
12845        }
12846#endif
12847#if defined(TARGET_NR_epoll_wait)
12848        case TARGET_NR_epoll_wait:
12849            ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12850                                             NULL, 0));
12851            break;
12852#endif
12853        default:
12854            ret = -TARGET_ENOSYS;
12855        }
12856        if (!is_error(ret)) {
12857            int i;
12858            for (i = 0; i < ret; i++) {
12859                target_ep[i].events = tswap32(ep[i].events);
12860                target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12861            }
12862            unlock_user(target_ep, arg2,
12863                        ret * sizeof(struct target_epoll_event));
12864        } else {
12865            unlock_user(target_ep, arg2, 0);
12866        }
12867        g_free(ep);
12868        return ret;
12869    }
12870#endif
12871#endif
12872#ifdef TARGET_NR_prlimit64
12873    case TARGET_NR_prlimit64:
12874    {
12875        /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12876        struct target_rlimit64 *target_rnew, *target_rold;
12877        struct host_rlimit64 rnew, rold, *rnewp = 0;
12878        int resource = target_to_host_resource(arg2);
12879
12880        if (arg3 && (resource != RLIMIT_AS &&
12881                     resource != RLIMIT_DATA &&
12882                     resource != RLIMIT_STACK)) {
12883            if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12884                return -TARGET_EFAULT;
12885            }
12886            rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12887            rnew.rlim_max = tswap64(target_rnew->rlim_max);
12888            unlock_user_struct(target_rnew, arg3, 0);
12889            rnewp = &rnew;
12890        }
12891
12892        ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12893        if (!is_error(ret) && arg4) {
12894            if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12895                return -TARGET_EFAULT;
12896            }
12897            target_rold->rlim_cur = tswap64(rold.rlim_cur);
12898            target_rold->rlim_max = tswap64(rold.rlim_max);
12899            unlock_user_struct(target_rold, arg4, 1);
12900        }
12901        return ret;
12902    }
12903#endif
12904#ifdef TARGET_NR_gethostname
12905    case TARGET_NR_gethostname:
12906    {
12907        char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12908        if (name) {
12909            ret = get_errno(gethostname(name, arg2));
12910            unlock_user(name, arg1, arg2);
12911        } else {
12912            ret = -TARGET_EFAULT;
12913        }
12914        return ret;
12915    }
12916#endif
12917#ifdef TARGET_NR_atomic_cmpxchg_32
12918    case TARGET_NR_atomic_cmpxchg_32:
12919    {
12920        /* should use start_exclusive from main.c */
12921        abi_ulong mem_value;
12922        if (get_user_u32(mem_value, arg6)) {
12923            target_siginfo_t info;
12924            info.si_signo = SIGSEGV;
12925            info.si_errno = 0;
12926            info.si_code = TARGET_SEGV_MAPERR;
12927            info._sifields._sigfault._addr = arg6;
12928            queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12929            ret = 0xdeadbeef;
12930
12931        }
12932        if (mem_value == arg2)
12933            put_user_u32(arg1, arg6);
12934        return mem_value;
12935    }
12936#endif
12937#ifdef TARGET_NR_atomic_barrier
12938    case TARGET_NR_atomic_barrier:
12939        /* Like the kernel implementation and the
12940           qemu arm barrier, no-op this? */
12941        return 0;
12942#endif
12943
12944#ifdef TARGET_NR_timer_create
12945    case TARGET_NR_timer_create:
12946    {
12947        /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12948
12949        struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12950
12951        int clkid = arg1;
12952        int timer_index = next_free_host_timer();
12953
12954        if (timer_index < 0) {
12955            ret = -TARGET_EAGAIN;
12956        } else {
12957            timer_t *phtimer = g_posix_timers  + timer_index;
12958
12959            if (arg2) {
12960                phost_sevp = &host_sevp;
12961                ret = target_to_host_sigevent(phost_sevp, arg2);
12962                if (ret != 0) {
12963                    free_host_timer_slot(timer_index);
12964                    return ret;
12965                }
12966            }
12967
12968            ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12969            if (ret) {
12970                free_host_timer_slot(timer_index);
12971            } else {
12972                if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12973                    timer_delete(*phtimer);
12974                    free_host_timer_slot(timer_index);
12975                    return -TARGET_EFAULT;
12976                }
12977            }
12978        }
12979        return ret;
12980    }
12981#endif
12982
12983#ifdef TARGET_NR_timer_settime
12984    case TARGET_NR_timer_settime:
12985    {
12986        /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12987         * struct itimerspec * old_value */
12988        target_timer_t timerid = get_timer_id(arg1);
12989
12990        if (timerid < 0) {
12991            ret = timerid;
12992        } else if (arg3 == 0) {
12993            ret = -TARGET_EINVAL;
12994        } else {
12995            timer_t htimer = g_posix_timers[timerid];
12996            struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12997
12998            if (target_to_host_itimerspec(&hspec_new, arg3)) {
12999                return -TARGET_EFAULT;
13000            }
13001            ret = get_errno(
13002                          timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13003            if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13004                return -TARGET_EFAULT;
13005            }
13006        }
13007        return ret;
13008    }
13009#endif
13010
13011#ifdef TARGET_NR_timer_settime64
13012    case TARGET_NR_timer_settime64:
13013    {
13014        target_timer_t timerid = get_timer_id(arg1);
13015
13016        if (timerid < 0) {
13017            ret = timerid;
13018        } else if (arg3 == 0) {
13019            ret = -TARGET_EINVAL;
13020        } else {
13021            timer_t htimer = g_posix_timers[timerid];
13022            struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13023
13024            if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13025                return -TARGET_EFAULT;
13026            }
13027            ret = get_errno(
13028                          timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13029            if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13030                return -TARGET_EFAULT;
13031            }
13032        }
13033        return ret;
13034    }
13035#endif
13036
13037#ifdef TARGET_NR_timer_gettime
13038    case TARGET_NR_timer_gettime:
13039    {
13040        /* args: timer_t timerid, struct itimerspec *curr_value */
13041        target_timer_t timerid = get_timer_id(arg1);
13042
13043        if (timerid < 0) {
13044            ret = timerid;
13045        } else if (!arg2) {
13046            ret = -TARGET_EFAULT;
13047        } else {
13048            timer_t htimer = g_posix_timers[timerid];
13049            struct itimerspec hspec;
13050            ret = get_errno(timer_gettime(htimer, &hspec));
13051
13052            if (host_to_target_itimerspec(arg2, &hspec)) {
13053                ret = -TARGET_EFAULT;
13054            }
13055        }
13056        return ret;
13057    }
13058#endif
13059
13060#ifdef TARGET_NR_timer_gettime64
13061    case TARGET_NR_timer_gettime64:
13062    {
13063        /* args: timer_t timerid, struct itimerspec64 *curr_value */
13064        target_timer_t timerid = get_timer_id(arg1);
13065
13066        if (timerid < 0) {
13067            ret = timerid;
13068        } else if (!arg2) {
13069            ret = -TARGET_EFAULT;
13070        } else {
13071            timer_t htimer = g_posix_timers[timerid];
13072            struct itimerspec hspec;
13073            ret = get_errno(timer_gettime(htimer, &hspec));
13074
13075            if (host_to_target_itimerspec64(arg2, &hspec)) {
13076                ret = -TARGET_EFAULT;
13077            }
13078        }
13079        return ret;
13080    }
13081#endif
13082
13083#ifdef TARGET_NR_timer_getoverrun
13084    case TARGET_NR_timer_getoverrun:
13085    {
13086        /* args: timer_t timerid */
13087        target_timer_t timerid = get_timer_id(arg1);
13088
13089        if (timerid < 0) {
13090            ret = timerid;
13091        } else {
13092            timer_t htimer = g_posix_timers[timerid];
13093            ret = get_errno(timer_getoverrun(htimer));
13094        }
13095        return ret;
13096    }
13097#endif
13098
13099#ifdef TARGET_NR_timer_delete
13100    case TARGET_NR_timer_delete:
13101    {
13102        /* args: timer_t timerid */
13103        target_timer_t timerid = get_timer_id(arg1);
13104
13105        if (timerid < 0) {
13106            ret = timerid;
13107        } else {
13108            timer_t htimer = g_posix_timers[timerid];
13109            ret = get_errno(timer_delete(htimer));
13110            free_host_timer_slot(timerid);
13111        }
13112        return ret;
13113    }
13114#endif
13115
13116#if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13117    case TARGET_NR_timerfd_create:
13118        return get_errno(timerfd_create(arg1,
13119                          target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13120#endif
13121
13122#if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13123    case TARGET_NR_timerfd_gettime:
13124        {
13125            struct itimerspec its_curr;
13126
13127            ret = get_errno(timerfd_gettime(arg1, &its_curr));
13128
13129            if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13130                return -TARGET_EFAULT;
13131            }
13132        }
13133        return ret;
13134#endif
13135
13136#if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13137    case TARGET_NR_timerfd_gettime64:
13138        {
13139            struct itimerspec its_curr;
13140
13141            ret = get_errno(timerfd_gettime(arg1, &its_curr));
13142
13143            if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13144                return -TARGET_EFAULT;
13145            }
13146        }
13147        return ret;
13148#endif
13149
13150#if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13151    case TARGET_NR_timerfd_settime:
13152        {
13153            struct itimerspec its_new, its_old, *p_new;
13154
13155            if (arg3) {
13156                if (target_to_host_itimerspec(&its_new, arg3)) {
13157                    return -TARGET_EFAULT;
13158                }
13159                p_new = &its_new;
13160            } else {
13161                p_new = NULL;
13162            }
13163
13164            ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13165
13166            if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13167                return -TARGET_EFAULT;
13168            }
13169        }
13170        return ret;
13171#endif
13172
13173#if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13174    case TARGET_NR_timerfd_settime64:
13175        {
13176            struct itimerspec its_new, its_old, *p_new;
13177
13178            if (arg3) {
13179                if (target_to_host_itimerspec64(&its_new, arg3)) {
13180                    return -TARGET_EFAULT;
13181                }
13182                p_new = &its_new;
13183            } else {
13184                p_new = NULL;
13185            }
13186
13187            ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13188
13189            if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13190                return -TARGET_EFAULT;
13191            }
13192        }
13193        return ret;
13194#endif
13195
13196#if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13197    case TARGET_NR_ioprio_get:
13198        return get_errno(ioprio_get(arg1, arg2));
13199#endif
13200
13201#if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13202    case TARGET_NR_ioprio_set:
13203        return get_errno(ioprio_set(arg1, arg2, arg3));
13204#endif
13205
13206#if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13207    case TARGET_NR_setns:
13208        return get_errno(setns(arg1, arg2));
13209#endif
13210#if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13211    case TARGET_NR_unshare:
13212        return get_errno(unshare(arg1));
13213#endif
13214#if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13215    case TARGET_NR_kcmp:
13216        return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13217#endif
13218#ifdef TARGET_NR_swapcontext
13219    case TARGET_NR_swapcontext:
13220        /* PowerPC specific.  */
13221        return do_swapcontext(cpu_env, arg1, arg2, arg3);
13222#endif
13223#ifdef TARGET_NR_memfd_create
13224    case TARGET_NR_memfd_create:
13225        p = lock_user_string(arg1);
13226        if (!p) {
13227            return -TARGET_EFAULT;
13228        }
13229        ret = get_errno(memfd_create(p, arg2));
13230        fd_trans_unregister(ret);
13231        unlock_user(p, arg1, 0);
13232        return ret;
13233#endif
13234#if defined TARGET_NR_membarrier && defined __NR_membarrier
13235    case TARGET_NR_membarrier:
13236        return get_errno(membarrier(arg1, arg2));
13237#endif
13238
13239#if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13240    case TARGET_NR_copy_file_range:
13241        {
13242            loff_t inoff, outoff;
13243            loff_t *pinoff = NULL, *poutoff = NULL;
13244
13245            if (arg2) {
13246                if (get_user_u64(inoff, arg2)) {
13247                    return -TARGET_EFAULT;
13248                }
13249                pinoff = &inoff;
13250            }
13251            if (arg4) {
13252                if (get_user_u64(outoff, arg4)) {
13253                    return -TARGET_EFAULT;
13254                }
13255                poutoff = &outoff;
13256            }
13257            /* Do not sign-extend the count parameter. */
13258            ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13259                                                 (abi_ulong)arg5, arg6));
13260            if (!is_error(ret) && ret > 0) {
13261                if (arg2) {
13262                    if (put_user_u64(inoff, arg2)) {
13263                        return -TARGET_EFAULT;
13264                    }
13265                }
13266                if (arg4) {
13267                    if (put_user_u64(outoff, arg4)) {
13268                        return -TARGET_EFAULT;
13269                    }
13270                }
13271            }
13272        }
13273        return ret;
13274#endif
13275
13276#if defined(TARGET_NR_pivot_root)
13277    case TARGET_NR_pivot_root:
13278        {
13279            void *p2;
13280            p = lock_user_string(arg1); /* new_root */
13281            p2 = lock_user_string(arg2); /* put_old */
13282            if (!p || !p2) {
13283                ret = -TARGET_EFAULT;
13284            } else {
13285                ret = get_errno(pivot_root(p, p2));
13286            }
13287            unlock_user(p2, arg2, 0);
13288            unlock_user(p, arg1, 0);
13289        }
13290        return ret;
13291#endif
13292
13293    default:
13294        qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13295        return -TARGET_ENOSYS;
13296    }
13297    return ret;
13298}
13299
13300abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13301                    abi_long arg2, abi_long arg3, abi_long arg4,
13302                    abi_long arg5, abi_long arg6, abi_long arg7,
13303                    abi_long arg8)
13304{
13305    CPUState *cpu = env_cpu(cpu_env);
13306    abi_long ret;
13307
13308#ifdef DEBUG_ERESTARTSYS
13309    /* Debug-only code for exercising the syscall-restart code paths
13310     * in the per-architecture cpu main loops: restart every syscall
13311     * the guest makes once before letting it through.
13312     */
13313    {
13314        static bool flag;
13315        flag = !flag;
13316        if (flag) {
13317            return -QEMU_ERESTARTSYS;
13318        }
13319    }
13320#endif
13321
13322    record_syscall_start(cpu, num, arg1,
13323                         arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13324
13325    if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13326        print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13327    }
13328
13329    ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13330                      arg5, arg6, arg7, arg8);
13331
13332    if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13333        print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13334                          arg3, arg4, arg5, arg6);
13335    }
13336
13337    record_syscall_return(cpu, num, ret);
13338    return ret;
13339}
13340