linux/arch/x86/um/shared/sysdep/stub_64.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
   3 * Licensed under the GPL
   4 */
   5
   6#ifndef __SYSDEP_STUB_H
   7#define __SYSDEP_STUB_H
   8
   9#include <sysdep/ptrace_user.h>
  10
  11#define STUB_SYSCALL_RET PT_INDEX(RAX)
  12#define STUB_MMAP_NR __NR_mmap
  13#define MMAP_OFFSET(o) (o)
  14
  15#define __syscall_clobber "r11","rcx","memory"
  16#define __syscall "syscall"
  17
  18static inline long stub_syscall0(long syscall)
  19{
  20        long ret;
  21
  22        __asm__ volatile (__syscall
  23                : "=a" (ret)
  24                : "0" (syscall) : __syscall_clobber );
  25
  26        return ret;
  27}
  28
  29static inline long stub_syscall2(long syscall, long arg1, long arg2)
  30{
  31        long ret;
  32
  33        __asm__ volatile (__syscall
  34                : "=a" (ret)
  35                : "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber );
  36
  37        return ret;
  38}
  39
  40static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
  41{
  42        long ret;
  43
  44        __asm__ volatile (__syscall
  45                : "=a" (ret)
  46                : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3)
  47                : __syscall_clobber );
  48
  49        return ret;
  50}
  51
  52static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
  53                                 long arg4)
  54{
  55        long ret;
  56
  57        __asm__ volatile ("movq %5,%%r10 ; " __syscall
  58                : "=a" (ret)
  59                : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
  60                  "g" (arg4)
  61                : __syscall_clobber, "r10" );
  62
  63        return ret;
  64}
  65
  66static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
  67                                 long arg4, long arg5)
  68{
  69        long ret;
  70
  71        __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall
  72                : "=a" (ret)
  73                : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
  74                  "g" (arg4), "g" (arg5)
  75                : __syscall_clobber, "r10", "r8" );
  76
  77        return ret;
  78}
  79
  80static inline void trap_myself(void)
  81{
  82        __asm("int3");
  83}
  84
  85static inline void remap_stack(long fd, unsigned long offset)
  86{
  87        __asm__ volatile ("movq %4,%%r10 ; movq %5,%%r8 ; "
  88                          "movq %6, %%r9; " __syscall "; movq %7, %%rbx ; "
  89                          "movq %%rax, (%%rbx)":
  90                          : "a" (STUB_MMAP_NR), "D" (STUB_DATA),
  91                            "S" (UM_KERN_PAGE_SIZE),
  92                            "d" (PROT_READ | PROT_WRITE),
  93                            "g" (MAP_FIXED | MAP_SHARED), "g" (fd),
  94                            "g" (offset),
  95                            "i" (&((struct stub_data *) STUB_DATA)->err)
  96                          : __syscall_clobber, "r10", "r8", "r9" );
  97}
  98
  99#endif
 100