linux/arch/x86/include/asm/uaccess.h
<<
>>
Prefs
   1#ifndef _ASM_X86_UACCESS_H
   2#define _ASM_X86_UACCESS_H
   3/*
   4 * User space memory access functions
   5 */
   6#include <linux/errno.h>
   7#include <linux/compiler.h>
   8#include <linux/thread_info.h>
   9#include <linux/string.h>
  10#include <asm/asm.h>
  11#include <asm/page.h>
  12#include <asm/smap.h>
  13
  14#define VERIFY_READ 0
  15#define VERIFY_WRITE 1
  16
  17/*
  18 * The fs value determines whether argument validity checking should be
  19 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  20 * get_fs() == KERNEL_DS, checking is bypassed.
  21 *
  22 * For historical reasons, these macros are grossly misnamed.
  23 */
  24
  25#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
  26
  27#define KERNEL_DS       MAKE_MM_SEG(-1UL)
  28#define USER_DS         MAKE_MM_SEG(TASK_SIZE_MAX)
  29
  30#define get_ds()        (KERNEL_DS)
  31#define get_fs()        (current_thread_info()->addr_limit)
  32#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  33
  34#define segment_eq(a, b)        ((a).seg == (b).seg)
  35
  36#define user_addr_max() (current_thread_info()->addr_limit.seg)
  37#define __addr_ok(addr)         \
  38        ((unsigned long __force)(addr) < user_addr_max())
  39
  40/*
  41 * Test whether a block of memory is a valid user space address.
  42 * Returns 0 if the range is valid, nonzero otherwise.
  43 */
  44static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
  45{
  46        /*
  47         * If we have used "sizeof()" for the size,
  48         * we know it won't overflow the limit (but
  49         * it might overflow the 'addr', so it's
  50         * important to subtract the size from the
  51         * limit, not add it to the address).
  52         */
  53        if (__builtin_constant_p(size))
  54                return unlikely(addr > limit - size);
  55
  56        /* Arbitrary sizes? Be careful about overflow */
  57        addr += size;
  58        if (unlikely(addr < size))
  59                return true;
  60        return unlikely(addr > limit);
  61}
  62
  63#define __range_not_ok(addr, size, limit)                               \
  64({                                                                      \
  65        __chk_user_ptr(addr);                                           \
  66        __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
  67})
  68
  69/**
  70 * access_ok: - Checks if a user space pointer is valid
  71 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
  72 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  73 *        to write to a block, it is always safe to read from it.
  74 * @addr: User space pointer to start of block to check
  75 * @size: Size of block to check
  76 *
  77 * Context: User context only. This function may sleep if pagefaults are
  78 *          enabled.
  79 *
  80 * Checks if a pointer to a block of memory in user space is valid.
  81 *
  82 * Returns true (nonzero) if the memory block may be valid, false (zero)
  83 * if it is definitely invalid.
  84 *
  85 * Note that, depending on architecture, this function probably just
  86 * checks that the pointer is in the user space range - after calling
  87 * this function, memory access functions may still return -EFAULT.
  88 */
  89#define access_ok(type, addr, size) \
  90        likely(!__range_not_ok(addr, size, user_addr_max()))
  91
  92/*
  93 * The exception table consists of pairs of addresses relative to the
  94 * exception table enty itself: the first is the address of an
  95 * instruction that is allowed to fault, and the second is the address
  96 * at which the program should continue.  No registers are modified,
  97 * so it is entirely up to the continuation code to figure out what to
  98 * do.
  99 *
 100 * All the routines below use bits of fixup code that are out of line
 101 * with the main instruction path.  This means when everything is well,
 102 * we don't even have to jump over them.  Further, they do not intrude
 103 * on our cache or tlb entries.
 104 */
 105
 106struct exception_table_entry {
 107        int insn, fixup;
 108};
 109/* This is not the generic standard exception_table_entry format */
 110#define ARCH_HAS_SORT_EXTABLE
 111#define ARCH_HAS_SEARCH_EXTABLE
 112
 113extern int fixup_exception(struct pt_regs *regs);
 114extern int early_fixup_exception(unsigned long *ip);
 115
 116/*
 117 * These are the main single-value transfer routines.  They automatically
 118 * use the right size if we just have the right pointer type.
 119 *
 120 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 121 * and yet we don't want to do any pointers, because that is too much
 122 * of a performance impact. Thus we have a few rather ugly macros here,
 123 * and hide all the ugliness from the user.
 124 *
 125 * The "__xxx" versions of the user access functions are versions that
 126 * do not verify the address space, that must have been done previously
 127 * with a separate "access_ok()" call (this is used when we do multiple
 128 * accesses to the same area of user memory).
 129 */
 130
 131extern int __get_user_1(void);
 132extern int __get_user_2(void);
 133extern int __get_user_4(void);
 134extern int __get_user_8(void);
 135extern int __get_user_bad(void);
 136
 137#define __uaccess_begin() stac()
 138#define __uaccess_end()   clac()
 139
 140/*
 141 * This is a type: either unsigned long, if the argument fits into
 142 * that type, or otherwise unsigned long long.
 143 */
 144#define __inttype(x) \
 145__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 146
 147/**
 148 * get_user: - Get a simple variable from user space.
 149 * @x:   Variable to store result.
 150 * @ptr: Source address, in user space.
 151 *
 152 * Context: User context only. This function may sleep if pagefaults are
 153 *          enabled.
 154 *
 155 * This macro copies a single simple variable from user space to kernel
 156 * space.  It supports simple types like char and int, but not larger
 157 * data types like structures or arrays.
 158 *
 159 * @ptr must have pointer-to-simple-variable type, and the result of
 160 * dereferencing @ptr must be assignable to @x without a cast.
 161 *
 162 * Returns zero on success, or -EFAULT on error.
 163 * On error, the variable @x is set to zero.
 164 */
 165/*
 166 * Careful: we have to cast the result to the type of the pointer
 167 * for sign reasons.
 168 *
 169 * The use of _ASM_DX as the register specifier is a bit of a
 170 * simplification, as gcc only cares about it as the starting point
 171 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
 172 * (%ecx being the next register in gcc's x86 register sequence), and
 173 * %rdx on 64 bits.
 174 *
 175 * Clang/LLVM cares about the size of the register, but still wants
 176 * the base register for something that ends up being a pair.
 177 */
 178#define get_user(x, ptr)                                                \
 179({                                                                      \
 180        int __ret_gu;                                                   \
 181        register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);            \
 182        __chk_user_ptr(ptr);                                            \
 183        might_fault();                                                  \
 184        asm volatile("call __get_user_%P3"                              \
 185                     : "=a" (__ret_gu), "=r" (__val_gu)                 \
 186                     : "0" (ptr), "i" (sizeof(*(ptr))));                \
 187        (x) = (__force __typeof__(*(ptr))) __val_gu;                    \
 188        __builtin_expect(__ret_gu, 0);                                  \
 189})
 190
 191#define __put_user_x(size, x, ptr, __ret_pu)                    \
 192        asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
 193                     : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 194
 195
 196
 197#ifdef CONFIG_X86_32
 198#define __put_user_asm_u64(x, addr, err, errret)                        \
 199        asm volatile("\n"                                               \
 200                     "1:        movl %%eax,0(%2)\n"                     \
 201                     "2:        movl %%edx,4(%2)\n"                     \
 202                     "3:"                                               \
 203                     ".section .fixup,\"ax\"\n"                         \
 204                     "4:        movl %3,%0\n"                           \
 205                     "  jmp 3b\n"                                       \
 206                     ".previous\n"                                      \
 207                     _ASM_EXTABLE(1b, 4b)                               \
 208                     _ASM_EXTABLE(2b, 4b)                               \
 209                     : "=r" (err)                                       \
 210                     : "A" (x), "r" (addr), "i" (errret), "0" (err))
 211
 212#define __put_user_asm_ex_u64(x, addr)                                  \
 213        asm volatile("\n"                                               \
 214                     "1:        movl %%eax,0(%1)\n"                     \
 215                     "2:        movl %%edx,4(%1)\n"                     \
 216                     "3:"                                               \
 217                     _ASM_EXTABLE_EX(1b, 2b)                            \
 218                     _ASM_EXTABLE_EX(2b, 3b)                            \
 219                     : : "A" (x), "r" (addr))
 220
 221#define __put_user_x8(x, ptr, __ret_pu)                         \
 222        asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
 223                     : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 224#else
 225#define __put_user_asm_u64(x, ptr, retval, errret) \
 226        __put_user_asm(x, ptr, retval, "q", "", "er", errret)
 227#define __put_user_asm_ex_u64(x, addr)  \
 228        __put_user_asm_ex(x, addr, "q", "", "er")
 229#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
 230#endif
 231
 232extern void __put_user_bad(void);
 233
 234/*
 235 * Strange magic calling convention: pointer in %ecx,
 236 * value in %eax(:%edx), return value in %eax. clobbers %rbx
 237 */
 238extern void __put_user_1(void);
 239extern void __put_user_2(void);
 240extern void __put_user_4(void);
 241extern void __put_user_8(void);
 242
 243/**
 244 * put_user: - Write a simple value into user space.
 245 * @x:   Value to copy to user space.
 246 * @ptr: Destination address, in user space.
 247 *
 248 * Context: User context only. This function may sleep if pagefaults are
 249 *          enabled.
 250 *
 251 * This macro copies a single simple value from kernel space to user
 252 * space.  It supports simple types like char and int, but not larger
 253 * data types like structures or arrays.
 254 *
 255 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 256 * to the result of dereferencing @ptr.
 257 *
 258 * Returns zero on success, or -EFAULT on error.
 259 */
 260#define put_user(x, ptr)                                        \
 261({                                                              \
 262        int __ret_pu;                                           \
 263        __typeof__(*(ptr)) __pu_val;                            \
 264        __chk_user_ptr(ptr);                                    \
 265        might_fault();                                          \
 266        __pu_val = x;                                           \
 267        switch (sizeof(*(ptr))) {                               \
 268        case 1:                                                 \
 269                __put_user_x(1, __pu_val, ptr, __ret_pu);       \
 270                break;                                          \
 271        case 2:                                                 \
 272                __put_user_x(2, __pu_val, ptr, __ret_pu);       \
 273                break;                                          \
 274        case 4:                                                 \
 275                __put_user_x(4, __pu_val, ptr, __ret_pu);       \
 276                break;                                          \
 277        case 8:                                                 \
 278                __put_user_x8(__pu_val, ptr, __ret_pu);         \
 279                break;                                          \
 280        default:                                                \
 281                __put_user_x(X, __pu_val, ptr, __ret_pu);       \
 282                break;                                          \
 283        }                                                       \
 284        __builtin_expect(__ret_pu, 0);                          \
 285})
 286
 287#define __put_user_size(x, ptr, size, retval, errret)                   \
 288do {                                                                    \
 289        retval = 0;                                                     \
 290        __chk_user_ptr(ptr);                                            \
 291        switch (size) {                                                 \
 292        case 1:                                                         \
 293                __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
 294                break;                                                  \
 295        case 2:                                                         \
 296                __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
 297                break;                                                  \
 298        case 4:                                                         \
 299                __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
 300                break;                                                  \
 301        case 8:                                                         \
 302                __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,  \
 303                                   errret);                             \
 304                break;                                                  \
 305        default:                                                        \
 306                __put_user_bad();                                       \
 307        }                                                               \
 308} while (0)
 309
 310/*
 311 * This doesn't do __uaccess_begin/end - the exception handling
 312 * around it must do that.
 313 */
 314#define __put_user_size_ex(x, ptr, size)                                \
 315do {                                                                    \
 316        __chk_user_ptr(ptr);                                            \
 317        switch (size) {                                                 \
 318        case 1:                                                         \
 319                __put_user_asm_ex(x, ptr, "b", "b", "iq");              \
 320                break;                                                  \
 321        case 2:                                                         \
 322                __put_user_asm_ex(x, ptr, "w", "w", "ir");              \
 323                break;                                                  \
 324        case 4:                                                         \
 325                __put_user_asm_ex(x, ptr, "l", "k", "ir");              \
 326                break;                                                  \
 327        case 8:                                                         \
 328                __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);      \
 329                break;                                                  \
 330        default:                                                        \
 331                __put_user_bad();                                       \
 332        }                                                               \
 333} while (0)
 334
 335#ifdef CONFIG_X86_32
 336#define __get_user_asm_u64(x, ptr, retval, errret)      (x) = __get_user_bad()
 337#define __get_user_asm_ex_u64(x, ptr)                   (x) = __get_user_bad()
 338#else
 339#define __get_user_asm_u64(x, ptr, retval, errret) \
 340         __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
 341#define __get_user_asm_ex_u64(x, ptr) \
 342         __get_user_asm_ex(x, ptr, "q", "", "=r")
 343#endif
 344
 345#define __get_user_size(x, ptr, size, retval, errret)                   \
 346do {                                                                    \
 347        retval = 0;                                                     \
 348        __chk_user_ptr(ptr);                                            \
 349        switch (size) {                                                 \
 350        case 1:                                                         \
 351                __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
 352                break;                                                  \
 353        case 2:                                                         \
 354                __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
 355                break;                                                  \
 356        case 4:                                                         \
 357                __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
 358                break;                                                  \
 359        case 8:                                                         \
 360                __get_user_asm_u64(x, ptr, retval, errret);             \
 361                break;                                                  \
 362        default:                                                        \
 363                (x) = __get_user_bad();                                 \
 364        }                                                               \
 365} while (0)
 366
 367#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
 368        asm volatile("\n"                                               \
 369                     "1:        mov"itype" %2,%"rtype"1\n"              \
 370                     "2:\n"                                             \
 371                     ".section .fixup,\"ax\"\n"                         \
 372                     "3:        mov %3,%0\n"                            \
 373                     "  xor"itype" %"rtype"1,%"rtype"1\n"               \
 374                     "  jmp 2b\n"                                       \
 375                     ".previous\n"                                      \
 376                     _ASM_EXTABLE(1b, 3b)                               \
 377                     : "=r" (err), ltype(x)                             \
 378                     : "m" (__m(addr)), "i" (errret), "0" (err))
 379
 380/*
 381 * This doesn't do __uaccess_begin/end - the exception handling
 382 * around it must do that.
 383 */
 384#define __get_user_size_ex(x, ptr, size)                                \
 385do {                                                                    \
 386        __chk_user_ptr(ptr);                                            \
 387        switch (size) {                                                 \
 388        case 1:                                                         \
 389                __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
 390                break;                                                  \
 391        case 2:                                                         \
 392                __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
 393                break;                                                  \
 394        case 4:                                                         \
 395                __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
 396                break;                                                  \
 397        case 8:                                                         \
 398                __get_user_asm_ex_u64(x, ptr);                          \
 399                break;                                                  \
 400        default:                                                        \
 401                (x) = __get_user_bad();                                 \
 402        }                                                               \
 403} while (0)
 404
 405#define __get_user_asm_ex(x, addr, itype, rtype, ltype)                 \
 406        asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
 407                     "2:\n"                                             \
 408                     _ASM_EXTABLE_EX(1b, 2b)                            \
 409                     : ltype(x) : "m" (__m(addr)))
 410
 411#define __put_user_nocheck(x, ptr, size)                        \
 412({                                                              \
 413        int __pu_err;                                           \
 414        __uaccess_begin();                                      \
 415        __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
 416        __uaccess_end();                                        \
 417        __builtin_expect(__pu_err, 0);                          \
 418})
 419
 420#define __get_user_nocheck(x, ptr, size)                                \
 421({                                                                      \
 422        int __gu_err;                                                   \
 423        unsigned long __gu_val;                                         \
 424        __uaccess_begin();                                              \
 425        __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
 426        __uaccess_end();                                                \
 427        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 428        __builtin_expect(__gu_err, 0);                                  \
 429})
 430
 431/* FIXME: this hack is definitely wrong -AK */
 432struct __large_struct { unsigned long buf[100]; };
 433#define __m(x) (*(struct __large_struct __user *)(x))
 434
 435/*
 436 * Tell gcc we read from memory instead of writing: this is because
 437 * we do not write to any memory gcc knows about, so there are no
 438 * aliasing issues.
 439 */
 440#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
 441        asm volatile("\n"                                               \
 442                     "1:        mov"itype" %"rtype"1,%2\n"              \
 443                     "2:\n"                                             \
 444                     ".section .fixup,\"ax\"\n"                         \
 445                     "3:        mov %3,%0\n"                            \
 446                     "  jmp 2b\n"                                       \
 447                     ".previous\n"                                      \
 448                     _ASM_EXTABLE(1b, 3b)                               \
 449                     : "=r"(err)                                        \
 450                     : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
 451
 452#define __put_user_asm_ex(x, addr, itype, rtype, ltype)                 \
 453        asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
 454                     "2:\n"                                             \
 455                     _ASM_EXTABLE_EX(1b, 2b)                            \
 456                     : : ltype(x), "m" (__m(addr)))
 457
 458/*
 459 * uaccess_try and catch
 460 */
 461#define uaccess_try     do {                                            \
 462        current_thread_info()->uaccess_err = 0;                         \
 463        __uaccess_begin();                                              \
 464        barrier();
 465
 466#define uaccess_catch(err)                                              \
 467        __uaccess_end();                                                \
 468        (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);    \
 469} while (0)
 470
 471/**
 472 * __get_user: - Get a simple variable from user space, with less checking.
 473 * @x:   Variable to store result.
 474 * @ptr: Source address, in user space.
 475 *
 476 * Context: User context only. This function may sleep if pagefaults are
 477 *          enabled.
 478 *
 479 * This macro copies a single simple variable from user space to kernel
 480 * space.  It supports simple types like char and int, but not larger
 481 * data types like structures or arrays.
 482 *
 483 * @ptr must have pointer-to-simple-variable type, and the result of
 484 * dereferencing @ptr must be assignable to @x without a cast.
 485 *
 486 * Caller must check the pointer with access_ok() before calling this
 487 * function.
 488 *
 489 * Returns zero on success, or -EFAULT on error.
 490 * On error, the variable @x is set to zero.
 491 */
 492
 493#define __get_user(x, ptr)                                              \
 494        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 495
 496/**
 497 * __put_user: - Write a simple value into user space, with less checking.
 498 * @x:   Value to copy to user space.
 499 * @ptr: Destination address, in user space.
 500 *
 501 * Context: User context only. This function may sleep if pagefaults are
 502 *          enabled.
 503 *
 504 * This macro copies a single simple value from kernel space to user
 505 * space.  It supports simple types like char and int, but not larger
 506 * data types like structures or arrays.
 507 *
 508 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 509 * to the result of dereferencing @ptr.
 510 *
 511 * Caller must check the pointer with access_ok() before calling this
 512 * function.
 513 *
 514 * Returns zero on success, or -EFAULT on error.
 515 */
 516
 517#define __put_user(x, ptr)                                              \
 518        __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 519
 520#define __get_user_unaligned __get_user
 521#define __put_user_unaligned __put_user
 522
 523/*
 524 * {get|put}_user_try and catch
 525 *
 526 * get_user_try {
 527 *      get_user_ex(...);
 528 * } get_user_catch(err)
 529 */
 530#define get_user_try            uaccess_try
 531#define get_user_catch(err)     uaccess_catch(err)
 532
 533#define get_user_ex(x, ptr)     do {                                    \
 534        unsigned long __gue_val;                                        \
 535        __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
 536        (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
 537} while (0)
 538
 539#define put_user_try            uaccess_try
 540#define put_user_catch(err)     uaccess_catch(err)
 541
 542#define put_user_ex(x, ptr)                                             \
 543        __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 544
 545extern unsigned long
 546copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
 547extern __must_check long
 548strncpy_from_user(char *dst, const char __user *src, long count);
 549
 550extern __must_check long strlen_user(const char __user *str);
 551extern __must_check long strnlen_user(const char __user *str, long n);
 552
 553unsigned long __must_check clear_user(void __user *mem, unsigned long len);
 554unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
 555
 556extern void __cmpxchg_wrong_size(void)
 557        __compiletime_error("Bad argument size for cmpxchg");
 558
 559#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size)       \
 560({                                                                      \
 561        int __ret = 0;                                                  \
 562        __typeof__(ptr) __uval = (uval);                                \
 563        __typeof__(*(ptr)) __old = (old);                               \
 564        __typeof__(*(ptr)) __new = (new);                               \
 565        __uaccess_begin();                                              \
 566        switch (size) {                                                 \
 567        case 1:                                                         \
 568        {                                                               \
 569                asm volatile("\n"                                       \
 570                        "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n"          \
 571                        "2:\n"                                          \
 572                        "\t.section .fixup, \"ax\"\n"                   \
 573                        "3:\tmov     %3, %0\n"                          \
 574                        "\tjmp     2b\n"                                \
 575                        "\t.previous\n"                                 \
 576                        _ASM_EXTABLE(1b, 3b)                            \
 577                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 578                        : "i" (-EFAULT), "q" (__new), "1" (__old)       \
 579                        : "memory"                                      \
 580                );                                                      \
 581                break;                                                  \
 582        }                                                               \
 583        case 2:                                                         \
 584        {                                                               \
 585                asm volatile("\n"                                       \
 586                        "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n"          \
 587                        "2:\n"                                          \
 588                        "\t.section .fixup, \"ax\"\n"                   \
 589                        "3:\tmov     %3, %0\n"                          \
 590                        "\tjmp     2b\n"                                \
 591                        "\t.previous\n"                                 \
 592                        _ASM_EXTABLE(1b, 3b)                            \
 593                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 594                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
 595                        : "memory"                                      \
 596                );                                                      \
 597                break;                                                  \
 598        }                                                               \
 599        case 4:                                                         \
 600        {                                                               \
 601                asm volatile("\n"                                       \
 602                        "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"          \
 603                        "2:\n"                                          \
 604                        "\t.section .fixup, \"ax\"\n"                   \
 605                        "3:\tmov     %3, %0\n"                          \
 606                        "\tjmp     2b\n"                                \
 607                        "\t.previous\n"                                 \
 608                        _ASM_EXTABLE(1b, 3b)                            \
 609                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 610                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
 611                        : "memory"                                      \
 612                );                                                      \
 613                break;                                                  \
 614        }                                                               \
 615        case 8:                                                         \
 616        {                                                               \
 617                if (!IS_ENABLED(CONFIG_X86_64))                         \
 618                        __cmpxchg_wrong_size();                         \
 619                                                                        \
 620                asm volatile("\n"                                       \
 621                        "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"          \
 622                        "2:\n"                                          \
 623                        "\t.section .fixup, \"ax\"\n"                   \
 624                        "3:\tmov     %3, %0\n"                          \
 625                        "\tjmp     2b\n"                                \
 626                        "\t.previous\n"                                 \
 627                        _ASM_EXTABLE(1b, 3b)                            \
 628                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 629                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
 630                        : "memory"                                      \
 631                );                                                      \
 632                break;                                                  \
 633        }                                                               \
 634        default:                                                        \
 635                __cmpxchg_wrong_size();                                 \
 636        }                                                               \
 637        __uaccess_end();                                                \
 638        *__uval = __old;                                                \
 639        __ret;                                                          \
 640})
 641
 642#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new)               \
 643({                                                                      \
 644        access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ?                \
 645                __user_atomic_cmpxchg_inatomic((uval), (ptr),           \
 646                                (old), (new), sizeof(*(ptr))) :         \
 647                -EFAULT;                                                \
 648})
 649
 650/*
 651 * movsl can be slow when source and dest are not both 8-byte aligned
 652 */
 653#ifdef CONFIG_X86_INTEL_USERCOPY
 654extern struct movsl_mask {
 655        int mask;
 656} ____cacheline_aligned_in_smp movsl_mask;
 657#endif
 658
 659#define ARCH_HAS_NOCACHE_UACCESS 1
 660
 661#ifdef CONFIG_X86_32
 662# include <asm/uaccess_32.h>
 663#else
 664# include <asm/uaccess_64.h>
 665#endif
 666
 667unsigned long __must_check _copy_from_user(void *to, const void __user *from,
 668                                           unsigned n);
 669unsigned long __must_check _copy_to_user(void __user *to, const void *from,
 670                                         unsigned n);
 671
 672#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
 673# define copy_user_diag __compiletime_error
 674#else
 675# define copy_user_diag __compiletime_warning
 676#endif
 677
 678extern void copy_user_diag("copy_from_user() buffer size is too small")
 679copy_from_user_overflow(void);
 680extern void copy_user_diag("copy_to_user() buffer size is too small")
 681copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
 682
 683#undef copy_user_diag
 684
 685#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
 686
 687extern void
 688__compiletime_warning("copy_from_user() buffer size is not provably correct")
 689__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
 690#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
 691
 692extern void
 693__compiletime_warning("copy_to_user() buffer size is not provably correct")
 694__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
 695#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
 696
 697#else
 698
 699static inline void
 700__copy_from_user_overflow(int size, unsigned long count)
 701{
 702        WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
 703}
 704
 705#define __copy_to_user_overflow __copy_from_user_overflow
 706
 707#endif
 708
 709static inline unsigned long __must_check
 710copy_from_user(void *to, const void __user *from, unsigned long n)
 711{
 712        int sz = __compiletime_object_size(to);
 713
 714        might_fault();
 715
 716        /*
 717         * While we would like to have the compiler do the checking for us
 718         * even in the non-constant size case, any false positives there are
 719         * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
 720         * without - the [hopefully] dangerous looking nature of the warning
 721         * would make people go look at the respecitive call sites over and
 722         * over again just to find that there's no problem).
 723         *
 724         * And there are cases where it's just not realistic for the compiler
 725         * to prove the count to be in range. For example when multiple call
 726         * sites of a helper function - perhaps in different source files -
 727         * all doing proper range checking, yet the helper function not doing
 728         * so again.
 729         *
 730         * Therefore limit the compile time checking to the constant size
 731         * case, and do only runtime checking for non-constant sizes.
 732         */
 733
 734        if (likely(sz < 0 || sz >= n))
 735                n = _copy_from_user(to, from, n);
 736        else if(__builtin_constant_p(n))
 737                copy_from_user_overflow();
 738        else
 739                __copy_from_user_overflow(sz, n);
 740
 741        return n;
 742}
 743
 744static inline unsigned long __must_check
 745copy_to_user(void __user *to, const void *from, unsigned long n)
 746{
 747        int sz = __compiletime_object_size(from);
 748
 749        might_fault();
 750
 751        /* See the comment in copy_from_user() above. */
 752        if (likely(sz < 0 || sz >= n))
 753                n = _copy_to_user(to, from, n);
 754        else if(__builtin_constant_p(n))
 755                copy_to_user_overflow();
 756        else
 757                __copy_to_user_overflow(sz, n);
 758
 759        return n;
 760}
 761
 762#undef __copy_from_user_overflow
 763#undef __copy_to_user_overflow
 764
 765/*
 766 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
 767 * nested NMI paths are careful to preserve CR2.
 768 *
 769 * Caller must use pagefault_enable/disable, or run in interrupt context,
 770 * and also do a uaccess_ok() check
 771 */
 772#define __copy_from_user_nmi __copy_from_user_inatomic
 773
 774/*
 775 * The "unsafe" user accesses aren't really "unsafe", but the naming
 776 * is a big fat warning: you have to not only do the access_ok()
 777 * checking before using them, but you have to surround them with the
 778 * user_access_begin/end() pair.
 779 */
 780#define user_access_begin()     __uaccess_begin()
 781#define user_access_end()       __uaccess_end()
 782
 783#define unsafe_put_user(x, ptr)                                         \
 784({                                                                              \
 785        int __pu_err;                                                           \
 786        __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT);         \
 787        __builtin_expect(__pu_err, 0);                                          \
 788})
 789
 790#define unsafe_get_user(x, ptr)                                         \
 791({                                                                              \
 792        int __gu_err;                                                           \
 793        unsigned long __gu_val;                                                 \
 794        __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);    \
 795        (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
 796        __builtin_expect(__gu_err, 0);                                          \
 797})
 798
 799#endif /* _ASM_X86_UACCESS_H */
 800
 801