linux/include/asm-x86/uaccess_64.h
<<
>>
Prefs
   1#ifndef __X86_64_UACCESS_H
   2#define __X86_64_UACCESS_H
   3
   4/*
   5 * User space memory access functions
   6 */
   7#include <linux/compiler.h>
   8#include <linux/errno.h>
   9#include <linux/prefetch.h>
  10#include <asm/page.h>
  11
  12#define VERIFY_READ 0
  13#define VERIFY_WRITE 1
  14
  15/*
  16 * The fs value determines whether argument validity checking should be
  17 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  18 * get_fs() == KERNEL_DS, checking is bypassed.
  19 *
  20 * For historical reasons, these macros are grossly misnamed.
  21 */
  22
  23#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
  24
  25#define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
  26#define USER_DS         MAKE_MM_SEG(PAGE_OFFSET)
  27
  28#define get_ds()        (KERNEL_DS)
  29#define get_fs()        (current_thread_info()->addr_limit)
  30#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  31
  32#define segment_eq(a,b) ((a).seg == (b).seg)
  33
  34#define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
  35
  36/*
  37 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
  38 */
  39#define __range_not_ok(addr,size) ({ \
  40        unsigned long flag,roksum; \
  41        __chk_user_ptr(addr); \
  42        asm("# range_ok\n\r" \
  43                "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0"  \
  44                :"=&r" (flag), "=r" (roksum) \
  45                :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
  46        flag; })
  47
  48#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
  49
  50/*
  51 * The exception table consists of pairs of addresses: the first is the
  52 * address of an instruction that is allowed to fault, and the second is
  53 * the address at which the program should continue.  No registers are
  54 * modified, so it is entirely up to the continuation code to figure out
  55 * what to do.
  56 *
  57 * All the routines below use bits of fixup code that are out of line
  58 * with the main instruction path.  This means when everything is well,
  59 * we don't even have to jump over them.  Further, they do not intrude
  60 * on our cache or tlb entries.
  61 */
  62
  63struct exception_table_entry
  64{
  65        unsigned long insn, fixup;
  66};
  67
  68#define ARCH_HAS_SEARCH_EXTABLE
  69
  70/*
  71 * These are the main single-value transfer routines.  They automatically
  72 * use the right size if we just have the right pointer type.
  73 *
  74 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  75 * and yet we don't want to do any pointers, because that is too much
  76 * of a performance impact. Thus we have a few rather ugly macros here,
  77 * and hide all the ugliness from the user.
  78 *
  79 * The "__xxx" versions of the user access functions are versions that
  80 * do not verify the address space, that must have been done previously
  81 * with a separate "access_ok()" call (this is used when we do multiple
  82 * accesses to the same area of user memory).
  83 */
  84
  85#define __get_user_x(size,ret,x,ptr) \
  86        asm volatile("call __get_user_" #size \
  87                :"=a" (ret),"=d" (x) \
  88                :"c" (ptr) \
  89                :"r8")
  90
  91/* Careful: we have to cast the result to the type of the pointer for sign reasons */
  92#define get_user(x,ptr)                                                 \
  93({      unsigned long __val_gu;                                         \
  94        int __ret_gu;                                                   \
  95        __chk_user_ptr(ptr);                                            \
  96        switch(sizeof (*(ptr))) {                                       \
  97        case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;          \
  98        case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;          \
  99        case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;          \
 100        case 8:  __get_user_x(8,__ret_gu,__val_gu,ptr); break;          \
 101        default: __get_user_bad(); break;                               \
 102        }                                                               \
 103        (x) = (__force typeof(*(ptr)))__val_gu;                         \
 104        __ret_gu;                                                       \
 105})
 106
 107extern void __put_user_1(void);
 108extern void __put_user_2(void);
 109extern void __put_user_4(void);
 110extern void __put_user_8(void);
 111extern void __put_user_bad(void);
 112
 113#define __put_user_x(size,ret,x,ptr)                                    \
 114        asm volatile("call __put_user_" #size                   \
 115                :"=a" (ret)                                             \
 116                :"c" (ptr),"d" (x)                                      \
 117                :"r8")
 118
 119#define put_user(x,ptr)                                                 \
 120  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
 121
 122#define __get_user(x,ptr) \
 123  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
 124#define __put_user(x,ptr) \
 125  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
 126
 127#define __get_user_unaligned __get_user
 128#define __put_user_unaligned __put_user
 129
 130#define __put_user_nocheck(x,ptr,size)                  \
 131({                                                      \
 132        int __pu_err;                                   \
 133        __put_user_size((x),(ptr),(size),__pu_err);     \
 134        __pu_err;                                       \
 135})
 136
 137
 138#define __put_user_check(x,ptr,size)                    \
 139({                                                      \
 140        int __pu_err;                                   \
 141        typeof(*(ptr)) __user *__pu_addr = (ptr);       \
 142        switch (size) {                                 \
 143        case 1: __put_user_x(1,__pu_err,x,__pu_addr); break;    \
 144        case 2: __put_user_x(2,__pu_err,x,__pu_addr); break;    \
 145        case 4: __put_user_x(4,__pu_err,x,__pu_addr); break;    \
 146        case 8: __put_user_x(8,__pu_err,x,__pu_addr); break;    \
 147        default: __put_user_bad();                      \
 148        }                                               \
 149        __pu_err;                                       \
 150})
 151
 152#define __put_user_size(x,ptr,size,retval)                              \
 153do {                                                                    \
 154        retval = 0;                                                     \
 155        __chk_user_ptr(ptr);                                            \
 156        switch (size) {                                                 \
 157          case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
 158          case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
 159          case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
 160          case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\
 161          default: __put_user_bad();                                    \
 162        }                                                               \
 163} while (0)
 164
 165/* FIXME: this hack is definitely wrong -AK */
 166struct __large_struct { unsigned long buf[100]; };
 167#define __m(x) (*(struct __large_struct __user *)(x))
 168
 169/*
 170 * Tell gcc we read from memory instead of writing: this is because
 171 * we do not write to any memory gcc knows about, so there are no
 172 * aliasing issues.
 173 */
 174#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
 175        asm volatile(                                   \
 176                "1:     mov"itype" %"rtype"1,%2\n"              \
 177                "2:\n"                                          \
 178                ".section .fixup,\"ax\"\n"                      \
 179                "3:     mov %3,%0\n"                            \
 180                "       jmp 2b\n"                               \
 181                ".previous\n"                                   \
 182                ".section __ex_table,\"a\"\n"                   \
 183                "       .align 8\n"                             \
 184                "       .quad 1b,3b\n"                          \
 185                ".previous"                                     \
 186                : "=r"(err)                                     \
 187                : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
 188
 189
 190#define __get_user_nocheck(x,ptr,size)                          \
 191({                                                              \
 192        int __gu_err;                                           \
 193        unsigned long __gu_val;                                 \
 194        __get_user_size(__gu_val,(ptr),(size),__gu_err);        \
 195        (x) = (__force typeof(*(ptr)))__gu_val;                 \
 196        __gu_err;                                               \
 197})
 198
 199extern int __get_user_1(void);
 200extern int __get_user_2(void);
 201extern int __get_user_4(void);
 202extern int __get_user_8(void);
 203extern int __get_user_bad(void);
 204
 205#define __get_user_size(x,ptr,size,retval)                              \
 206do {                                                                    \
 207        retval = 0;                                                     \
 208        __chk_user_ptr(ptr);                                            \
 209        switch (size) {                                                 \
 210          case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
 211          case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
 212          case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
 213          case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
 214          default: (x) = __get_user_bad();                              \
 215        }                                                               \
 216} while (0)
 217
 218#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
 219        asm volatile(                                   \
 220                "1:     mov"itype" %2,%"rtype"1\n"              \
 221                "2:\n"                                          \
 222                ".section .fixup,\"ax\"\n"                      \
 223                "3:     mov %3,%0\n"                            \
 224                "       xor"itype" %"rtype"1,%"rtype"1\n"       \
 225                "       jmp 2b\n"                               \
 226                ".previous\n"                                   \
 227                ".section __ex_table,\"a\"\n"                   \
 228                "       .align 8\n"                             \
 229                "       .quad 1b,3b\n"                          \
 230                ".previous"                                     \
 231                : "=r"(err), ltype (x)                          \
 232                : "m"(__m(addr)), "i"(errno), "0"(err))
 233
 234/*
 235 * Copy To/From Userspace
 236 */
 237
 238/* Handles exceptions in both to and from, but doesn't do access_ok */
 239__must_check unsigned long
 240copy_user_generic(void *to, const void *from, unsigned len);
 241
 242__must_check unsigned long
 243copy_to_user(void __user *to, const void *from, unsigned len);
 244__must_check unsigned long
 245copy_from_user(void *to, const void __user *from, unsigned len);
 246__must_check unsigned long
 247copy_in_user(void __user *to, const void __user *from, unsigned len);
 248
 249static __always_inline __must_check
 250int __copy_from_user(void *dst, const void __user *src, unsigned size)
 251{ 
 252        int ret = 0;
 253        if (!__builtin_constant_p(size))
 254                return copy_user_generic(dst,(__force void *)src,size);
 255        switch (size) { 
 256        case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1); 
 257                return ret;
 258        case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
 259                return ret;
 260        case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
 261                return ret;
 262        case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
 263                return ret; 
 264        case 10:
 265                __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
 266                if (unlikely(ret)) return ret;
 267                __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
 268                return ret; 
 269        case 16:
 270                __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
 271                if (unlikely(ret)) return ret;
 272                __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
 273                return ret; 
 274        default:
 275                return copy_user_generic(dst,(__force void *)src,size); 
 276        }
 277}       
 278
 279static __always_inline __must_check
 280int __copy_to_user(void __user *dst, const void *src, unsigned size)
 281{ 
 282        int ret = 0;
 283        if (!__builtin_constant_p(size))
 284                return copy_user_generic((__force void *)dst,src,size);
 285        switch (size) { 
 286        case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1); 
 287                return ret;
 288        case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
 289                return ret;
 290        case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
 291                return ret;
 292        case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
 293                return ret; 
 294        case 10:
 295                __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
 296                if (unlikely(ret)) return ret;
 297                asm("":::"memory");
 298                __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
 299                return ret; 
 300        case 16:
 301                __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
 302                if (unlikely(ret)) return ret;
 303                asm("":::"memory");
 304                __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
 305                return ret; 
 306        default:
 307                return copy_user_generic((__force void *)dst,src,size); 
 308        }
 309}       
 310
 311static __always_inline __must_check
 312int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 313{ 
 314        int ret = 0;
 315        if (!__builtin_constant_p(size))
 316                return copy_user_generic((__force void *)dst,(__force void *)src,size);
 317        switch (size) { 
 318        case 1: { 
 319                u8 tmp;
 320                __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1); 
 321                if (likely(!ret))
 322                        __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1); 
 323                return ret;
 324        }
 325        case 2: { 
 326                u16 tmp;
 327                __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2); 
 328                if (likely(!ret))
 329                        __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2); 
 330                return ret;
 331        }
 332
 333        case 4: { 
 334                u32 tmp;
 335                __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4); 
 336                if (likely(!ret))
 337                        __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4); 
 338                return ret;
 339        }
 340        case 8: { 
 341                u64 tmp;
 342                __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8); 
 343                if (likely(!ret))
 344                        __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8); 
 345                return ret;
 346        }
 347        default:
 348                return copy_user_generic((__force void *)dst,(__force void *)src,size); 
 349        }
 350}       
 351
 352__must_check long 
 353strncpy_from_user(char *dst, const char __user *src, long count);
 354__must_check long 
 355__strncpy_from_user(char *dst, const char __user *src, long count);
 356__must_check long strnlen_user(const char __user *str, long n);
 357__must_check long __strnlen_user(const char __user *str, long n);
 358__must_check long strlen_user(const char __user *str);
 359__must_check unsigned long clear_user(void __user *mem, unsigned long len);
 360__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
 361
 362__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
 363
 364static __must_check __always_inline int
 365__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
 366{
 367        return copy_user_generic((__force void *)dst, src, size);
 368}
 369
 370#define ARCH_HAS_NOCACHE_UACCESS 1
 371extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest);
 372
 373static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
 374{
 375        might_sleep();
 376        return __copy_user_nocache(dst, src, size, 1);
 377}
 378
 379static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size)
 380{
 381        return __copy_user_nocache(dst, src, size, 0);
 382}
 383
 384#endif /* __X86_64_UACCESS_H */
 385