linux/arch/mips/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
   7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   8 * Copyright (C) 2007  Maciej W. Rozycki
   9 * Copyright (C) 2014, Imagination Technologies Ltd.
  10 */
  11#ifndef _ASM_UACCESS_H
  12#define _ASM_UACCESS_H
  13
  14#include <linux/kernel.h>
  15#include <linux/errno.h>
  16#include <linux/thread_info.h>
  17#include <asm/asm-eva.h>
  18
  19/*
  20 * The fs value determines whether argument validity checking should be
  21 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  22 * get_fs() == KERNEL_DS, checking is bypassed.
  23 *
  24 * For historical reasons, these macros are grossly misnamed.
  25 */
  26#ifdef CONFIG_32BIT
  27
  28#ifdef CONFIG_KVM_GUEST
  29#define __UA_LIMIT 0x40000000UL
  30#else
  31#define __UA_LIMIT 0x80000000UL
  32#endif
  33
  34#define __UA_ADDR       ".word"
  35#define __UA_LA         "la"
  36#define __UA_ADDU       "addu"
  37#define __UA_t0         "$8"
  38#define __UA_t1         "$9"
  39
  40#endif /* CONFIG_32BIT */
  41
  42#ifdef CONFIG_64BIT
  43
  44extern u64 __ua_limit;
  45
  46#define __UA_LIMIT      __ua_limit
  47
  48#define __UA_ADDR       ".dword"
  49#define __UA_LA         "dla"
  50#define __UA_ADDU       "daddu"
  51#define __UA_t0         "$12"
  52#define __UA_t1         "$13"
  53
  54#endif /* CONFIG_64BIT */
  55
  56/*
  57 * USER_DS is a bitmask that has the bits set that may not be set in a valid
  58 * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
  59 * the arithmetic we're doing only works if the limit is a power of two, so
  60 * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
  61 * address in this range it's the process's problem, not ours :-)
  62 */
  63
  64#ifdef CONFIG_KVM_GUEST
  65#define KERNEL_DS       ((mm_segment_t) { 0x80000000UL })
  66#define USER_DS         ((mm_segment_t) { 0xC0000000UL })
  67#else
  68#define KERNEL_DS       ((mm_segment_t) { 0UL })
  69#define USER_DS         ((mm_segment_t) { __UA_LIMIT })
  70#endif
  71
  72#define VERIFY_READ    0
  73#define VERIFY_WRITE   1
  74
  75#define get_ds()        (KERNEL_DS)
  76#define get_fs()        (current_thread_info()->addr_limit)
  77#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  78
  79#define segment_eq(a, b)        ((a).seg == (b).seg)
  80
  81
  82/*
  83 * Is a address valid? This does a straighforward calculation rather
  84 * than tests.
  85 *
  86 * Address valid if:
  87 *  - "addr" doesn't have any high-bits set
  88 *  - AND "size" doesn't have any high-bits set
  89 *  - AND "addr+size" doesn't have any high-bits set
  90 *  - OR we are in kernel mode.
  91 *
  92 * __ua_size() is a trick to avoid runtime checking of positive constant
  93 * sizes; for those we already know at compile time that the size is ok.
  94 */
  95#define __ua_size(size)                                                 \
  96        ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
  97
  98/*
  99 * access_ok: - Checks if a user space pointer is valid
 100 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
 101 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
 102 *        to write to a block, it is always safe to read from it.
 103 * @addr: User space pointer to start of block to check
 104 * @size: Size of block to check
 105 *
 106 * Context: User context only.  This function may sleep.
 107 *
 108 * Checks if a pointer to a block of memory in user space is valid.
 109 *
 110 * Returns true (nonzero) if the memory block may be valid, false (zero)
 111 * if it is definitely invalid.
 112 *
 113 * Note that, depending on architecture, this function probably just
 114 * checks that the pointer is in the user space range - after calling
 115 * this function, memory access functions may still return -EFAULT.
 116 */
 117
 118#define __access_mask get_fs().seg
 119
 120#define __access_ok(addr, size, mask)                                   \
 121({                                                                      \
 122        unsigned long __addr = (unsigned long) (addr);                  \
 123        unsigned long __size = size;                                    \
 124        unsigned long __mask = mask;                                    \
 125        unsigned long __ok;                                             \
 126                                                                        \
 127        __chk_user_ptr(addr);                                           \
 128        __ok = (signed long)(__mask & (__addr | (__addr + __size) |     \
 129                __ua_size(__size)));                                    \
 130        __ok == 0;                                                      \
 131})
 132
 133#define access_ok(type, addr, size)                                     \
 134        likely(__access_ok((addr), (size), __access_mask))
 135
 136/*
 137 * put_user: - Write a simple value into user space.
 138 * @x:   Value to copy to user space.
 139 * @ptr: Destination address, in user space.
 140 *
 141 * Context: User context only.  This function may sleep.
 142 *
 143 * This macro copies a single simple value from kernel space to user
 144 * space.  It supports simple types like char and int, but not larger
 145 * data types like structures or arrays.
 146 *
 147 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 148 * to the result of dereferencing @ptr.
 149 *
 150 * Returns zero on success, or -EFAULT on error.
 151 */
 152#define put_user(x,ptr) \
 153        __put_user_check((x), (ptr), sizeof(*(ptr)))
 154
 155/*
 156 * get_user: - Get a simple variable from user space.
 157 * @x:   Variable to store result.
 158 * @ptr: Source address, in user space.
 159 *
 160 * Context: User context only.  This function may sleep.
 161 *
 162 * This macro copies a single simple variable from user space to kernel
 163 * space.  It supports simple types like char and int, but not larger
 164 * data types like structures or arrays.
 165 *
 166 * @ptr must have pointer-to-simple-variable type, and the result of
 167 * dereferencing @ptr must be assignable to @x without a cast.
 168 *
 169 * Returns zero on success, or -EFAULT on error.
 170 * On error, the variable @x is set to zero.
 171 */
 172#define get_user(x,ptr) \
 173        __get_user_check((x), (ptr), sizeof(*(ptr)))
 174
 175/*
 176 * __put_user: - Write a simple value into user space, with less checking.
 177 * @x:   Value to copy to user space.
 178 * @ptr: Destination address, in user space.
 179 *
 180 * Context: User context only.  This function may sleep.
 181 *
 182 * This macro copies a single simple value from kernel space to user
 183 * space.  It supports simple types like char and int, but not larger
 184 * data types like structures or arrays.
 185 *
 186 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 187 * to the result of dereferencing @ptr.
 188 *
 189 * Caller must check the pointer with access_ok() before calling this
 190 * function.
 191 *
 192 * Returns zero on success, or -EFAULT on error.
 193 */
 194#define __put_user(x,ptr) \
 195        __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
 196
 197/*
 198 * __get_user: - Get a simple variable from user space, with less checking.
 199 * @x:   Variable to store result.
 200 * @ptr: Source address, in user space.
 201 *
 202 * Context: User context only.  This function may sleep.
 203 *
 204 * This macro copies a single simple variable from user space to kernel
 205 * space.  It supports simple types like char and int, but not larger
 206 * data types like structures or arrays.
 207 *
 208 * @ptr must have pointer-to-simple-variable type, and the result of
 209 * dereferencing @ptr must be assignable to @x without a cast.
 210 *
 211 * Caller must check the pointer with access_ok() before calling this
 212 * function.
 213 *
 214 * Returns zero on success, or -EFAULT on error.
 215 * On error, the variable @x is set to zero.
 216 */
 217#define __get_user(x,ptr) \
 218        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 219
 220struct __large_struct { unsigned long buf[100]; };
 221#define __m(x) (*(struct __large_struct __user *)(x))
 222
 223/*
 224 * Yuck.  We need two variants, one for 64bit operation and one
 225 * for 32 bit mode and old iron.
 226 */
 227#ifndef CONFIG_EVA
 228#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
 229#else
 230/*
 231 * Kernel specific functions for EVA. We need to use normal load instructions
 232 * to read data from kernel when operating in EVA mode. We use these macros to
 233 * avoid redefining __get_user_asm for EVA.
 234 */
 235#undef _loadd
 236#undef _loadw
 237#undef _loadh
 238#undef _loadb
 239#ifdef CONFIG_32BIT
 240#define _loadd                  _loadw
 241#else
 242#define _loadd(reg, addr)       "ld " reg ", " addr
 243#endif
 244#define _loadw(reg, addr)       "lw " reg ", " addr
 245#define _loadh(reg, addr)       "lh " reg ", " addr
 246#define _loadb(reg, addr)       "lb " reg ", " addr
 247
 248#define __get_kernel_common(val, size, ptr)                             \
 249do {                                                                    \
 250        switch (size) {                                                 \
 251        case 1: __get_data_asm(val, _loadb, ptr); break;                \
 252        case 2: __get_data_asm(val, _loadh, ptr); break;                \
 253        case 4: __get_data_asm(val, _loadw, ptr); break;                \
 254        case 8: __GET_DW(val, _loadd, ptr); break;                      \
 255        default: __get_user_unknown(); break;                           \
 256        }                                                               \
 257} while (0)
 258#endif
 259
 260#ifdef CONFIG_32BIT
 261#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
 262#endif
 263#ifdef CONFIG_64BIT
 264#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
 265#endif
 266
 267extern void __get_user_unknown(void);
 268
 269#define __get_user_common(val, size, ptr)                               \
 270do {                                                                    \
 271        switch (size) {                                                 \
 272        case 1: __get_data_asm(val, user_lb, ptr); break;               \
 273        case 2: __get_data_asm(val, user_lh, ptr); break;               \
 274        case 4: __get_data_asm(val, user_lw, ptr); break;               \
 275        case 8: __GET_DW(val, user_ld, ptr); break;                     \
 276        default: __get_user_unknown(); break;                           \
 277        }                                                               \
 278} while (0)
 279
 280#define __get_user_nocheck(x, ptr, size)                                \
 281({                                                                      \
 282        int __gu_err;                                                   \
 283                                                                        \
 284        if (segment_eq(get_fs(), get_ds())) {                           \
 285                __get_kernel_common((x), size, ptr);                    \
 286        } else {                                                        \
 287                __chk_user_ptr(ptr);                                    \
 288                __get_user_common((x), size, ptr);                      \
 289        }                                                               \
 290        __gu_err;                                                       \
 291})
 292
 293#define __get_user_check(x, ptr, size)                                  \
 294({                                                                      \
 295        int __gu_err = -EFAULT;                                         \
 296        const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
 297                                                                        \
 298        might_fault();                                                  \
 299        if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {          \
 300                if (segment_eq(get_fs(), get_ds()))                     \
 301                        __get_kernel_common((x), size, __gu_ptr);       \
 302                else                                                    \
 303                        __get_user_common((x), size, __gu_ptr);         \
 304        } else                                                          \
 305                (x) = 0;                                                \
 306                                                                        \
 307        __gu_err;                                                       \
 308})
 309
 310#define __get_data_asm(val, insn, addr)                                 \
 311{                                                                       \
 312        long __gu_tmp;                                                  \
 313                                                                        \
 314        __asm__ __volatile__(                                           \
 315        "1:     "insn("%1", "%3")"                              \n"     \
 316        "2:                                                     \n"     \
 317        "       .insn                                           \n"     \
 318        "       .section .fixup,\"ax\"                          \n"     \
 319        "3:     li      %0, %4                                  \n"     \
 320        "       move    %1, $0                                  \n"     \
 321        "       j       2b                                      \n"     \
 322        "       .previous                                       \n"     \
 323        "       .section __ex_table,\"a\"                       \n"     \
 324        "       "__UA_ADDR "\t1b, 3b                            \n"     \
 325        "       .previous                                       \n"     \
 326        : "=r" (__gu_err), "=r" (__gu_tmp)                              \
 327        : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
 328                                                                        \
 329        (val) = (__typeof__(*(addr))) __gu_tmp;                         \
 330}
 331
 332/*
 333 * Get a long long 64 using 32 bit registers.
 334 */
 335#define __get_data_asm_ll32(val, insn, addr)                            \
 336{                                                                       \
 337        union {                                                         \
 338                unsigned long long      l;                              \
 339                __typeof__(*(addr))     t;                              \
 340        } __gu_tmp;                                                     \
 341                                                                        \
 342        __asm__ __volatile__(                                           \
 343        "1:     " insn("%1", "(%3)")"                           \n"     \
 344        "2:     " insn("%D1", "4(%3)")"                         \n"     \
 345        "3:                                                     \n"     \
 346        "       .insn                                           \n"     \
 347        "       .section        .fixup,\"ax\"                   \n"     \
 348        "4:     li      %0, %4                                  \n"     \
 349        "       move    %1, $0                                  \n"     \
 350        "       move    %D1, $0                                 \n"     \
 351        "       j       3b                                      \n"     \
 352        "       .previous                                       \n"     \
 353        "       .section        __ex_table,\"a\"                \n"     \
 354        "       " __UA_ADDR "   1b, 4b                          \n"     \
 355        "       " __UA_ADDR "   2b, 4b                          \n"     \
 356        "       .previous                                       \n"     \
 357        : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           \
 358        : "0" (0), "r" (addr), "i" (-EFAULT));                          \
 359                                                                        \
 360        (val) = __gu_tmp.t;                                             \
 361}
 362
 363#ifndef CONFIG_EVA
 364#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
 365#else
 366/*
 367 * Kernel specific functions for EVA. We need to use normal load instructions
 368 * to read data from kernel when operating in EVA mode. We use these macros to
 369 * avoid redefining __get_data_asm for EVA.
 370 */
 371#undef _stored
 372#undef _storew
 373#undef _storeh
 374#undef _storeb
 375#ifdef CONFIG_32BIT
 376#define _stored                 _storew
 377#else
 378#define _stored(reg, addr)      "ld " reg ", " addr
 379#endif
 380
 381#define _storew(reg, addr)      "sw " reg ", " addr
 382#define _storeh(reg, addr)      "sh " reg ", " addr
 383#define _storeb(reg, addr)      "sb " reg ", " addr
 384
 385#define __put_kernel_common(ptr, size)                                  \
 386do {                                                                    \
 387        switch (size) {                                                 \
 388        case 1: __put_data_asm(_storeb, ptr); break;                    \
 389        case 2: __put_data_asm(_storeh, ptr); break;                    \
 390        case 4: __put_data_asm(_storew, ptr); break;                    \
 391        case 8: __PUT_DW(_stored, ptr); break;                          \
 392        default: __put_user_unknown(); break;                           \
 393        }                                                               \
 394} while(0)
 395#endif
 396
 397/*
 398 * Yuck.  We need two variants, one for 64bit operation and one
 399 * for 32 bit mode and old iron.
 400 */
 401#ifdef CONFIG_32BIT
 402#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
 403#endif
 404#ifdef CONFIG_64BIT
 405#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
 406#endif
 407
 408#define __put_user_common(ptr, size)                                    \
 409do {                                                                    \
 410        switch (size) {                                                 \
 411        case 1: __put_data_asm(user_sb, ptr); break;                    \
 412        case 2: __put_data_asm(user_sh, ptr); break;                    \
 413        case 4: __put_data_asm(user_sw, ptr); break;                    \
 414        case 8: __PUT_DW(user_sd, ptr); break;                          \
 415        default: __put_user_unknown(); break;                           \
 416        }                                                               \
 417} while (0)
 418
 419#define __put_user_nocheck(x, ptr, size)                                \
 420({                                                                      \
 421        __typeof__(*(ptr)) __pu_val;                                    \
 422        int __pu_err = 0;                                               \
 423                                                                        \
 424        __pu_val = (x);                                                 \
 425        if (segment_eq(get_fs(), get_ds())) {                           \
 426                __put_kernel_common(ptr, size);                         \
 427        } else {                                                        \
 428                __chk_user_ptr(ptr);                                    \
 429                __put_user_common(ptr, size);                           \
 430        }                                                               \
 431        __pu_err;                                                       \
 432})
 433
 434#define __put_user_check(x, ptr, size)                                  \
 435({                                                                      \
 436        __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
 437        __typeof__(*(ptr)) __pu_val = (x);                              \
 438        int __pu_err = -EFAULT;                                         \
 439                                                                        \
 440        might_fault();                                                  \
 441        if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
 442                if (segment_eq(get_fs(), get_ds()))                     \
 443                        __put_kernel_common(__pu_addr, size);           \
 444                else                                                    \
 445                        __put_user_common(__pu_addr, size);             \
 446        }                                                               \
 447                                                                        \
 448        __pu_err;                                                       \
 449})
 450
 451#define __put_data_asm(insn, ptr)                                       \
 452{                                                                       \
 453        __asm__ __volatile__(                                           \
 454        "1:     "insn("%z2", "%3")"     # __put_data_asm        \n"     \
 455        "2:                                                     \n"     \
 456        "       .insn                                           \n"     \
 457        "       .section        .fixup,\"ax\"                   \n"     \
 458        "3:     li      %0, %4                                  \n"     \
 459        "       j       2b                                      \n"     \
 460        "       .previous                                       \n"     \
 461        "       .section        __ex_table,\"a\"                \n"     \
 462        "       " __UA_ADDR "   1b, 3b                          \n"     \
 463        "       .previous                                       \n"     \
 464        : "=r" (__pu_err)                                               \
 465        : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
 466          "i" (-EFAULT));                                               \
 467}
 468
 469#define __put_data_asm_ll32(insn, ptr)                                  \
 470{                                                                       \
 471        __asm__ __volatile__(                                           \
 472        "1:     "insn("%2", "(%3)")"    # __put_data_asm_ll32   \n"     \
 473        "2:     "insn("%D2", "4(%3)")"                          \n"     \
 474        "3:                                                     \n"     \
 475        "       .insn                                           \n"     \
 476        "       .section        .fixup,\"ax\"                   \n"     \
 477        "4:     li      %0, %4                                  \n"     \
 478        "       j       3b                                      \n"     \
 479        "       .previous                                       \n"     \
 480        "       .section        __ex_table,\"a\"                \n"     \
 481        "       " __UA_ADDR "   1b, 4b                          \n"     \
 482        "       " __UA_ADDR "   2b, 4b                          \n"     \
 483        "       .previous"                                              \
 484        : "=r" (__pu_err)                                               \
 485        : "0" (0), "r" (__pu_val), "r" (ptr),                           \
 486          "i" (-EFAULT));                                               \
 487}
 488
 489extern void __put_user_unknown(void);
 490
 491/*
 492 * ul{b,h,w} are macros and there are no equivalent macros for EVA.
 493 * EVA unaligned access is handled in the ADE exception handler.
 494 */
 495#ifndef CONFIG_EVA
 496/*
 497 * put_user_unaligned: - Write a simple value into user space.
 498 * @x:   Value to copy to user space.
 499 * @ptr: Destination address, in user space.
 500 *
 501 * Context: User context only.  This function may sleep.
 502 *
 503 * This macro copies a single simple value from kernel space to user
 504 * space.  It supports simple types like char and int, but not larger
 505 * data types like structures or arrays.
 506 *
 507 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 508 * to the result of dereferencing @ptr.
 509 *
 510 * Returns zero on success, or -EFAULT on error.
 511 */
 512#define put_user_unaligned(x,ptr)       \
 513        __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
 514
 515/*
 516 * get_user_unaligned: - Get a simple variable from user space.
 517 * @x:   Variable to store result.
 518 * @ptr: Source address, in user space.
 519 *
 520 * Context: User context only.  This function may sleep.
 521 *
 522 * This macro copies a single simple variable from user space to kernel
 523 * space.  It supports simple types like char and int, but not larger
 524 * data types like structures or arrays.
 525 *
 526 * @ptr must have pointer-to-simple-variable type, and the result of
 527 * dereferencing @ptr must be assignable to @x without a cast.
 528 *
 529 * Returns zero on success, or -EFAULT on error.
 530 * On error, the variable @x is set to zero.
 531 */
 532#define get_user_unaligned(x,ptr) \
 533        __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
 534
 535/*
 536 * __put_user_unaligned: - Write a simple value into user space, with less checking.
 537 * @x:   Value to copy to user space.
 538 * @ptr: Destination address, in user space.
 539 *
 540 * Context: User context only.  This function may sleep.
 541 *
 542 * This macro copies a single simple value from kernel space to user
 543 * space.  It supports simple types like char and int, but not larger
 544 * data types like structures or arrays.
 545 *
 546 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 547 * to the result of dereferencing @ptr.
 548 *
 549 * Caller must check the pointer with access_ok() before calling this
 550 * function.
 551 *
 552 * Returns zero on success, or -EFAULT on error.
 553 */
 554#define __put_user_unaligned(x,ptr) \
 555        __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
 556
 557/*
 558 * __get_user_unaligned: - Get a simple variable from user space, with less checking.
 559 * @x:   Variable to store result.
 560 * @ptr: Source address, in user space.
 561 *
 562 * Context: User context only.  This function may sleep.
 563 *
 564 * This macro copies a single simple variable from user space to kernel
 565 * space.  It supports simple types like char and int, but not larger
 566 * data types like structures or arrays.
 567 *
 568 * @ptr must have pointer-to-simple-variable type, and the result of
 569 * dereferencing @ptr must be assignable to @x without a cast.
 570 *
 571 * Caller must check the pointer with access_ok() before calling this
 572 * function.
 573 *
 574 * Returns zero on success, or -EFAULT on error.
 575 * On error, the variable @x is set to zero.
 576 */
 577#define __get_user_unaligned(x,ptr) \
 578        __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
 579
 580/*
 581 * Yuck.  We need two variants, one for 64bit operation and one
 582 * for 32 bit mode and old iron.
 583 */
 584#ifdef CONFIG_32BIT
 585#define __GET_USER_UNALIGNED_DW(val, ptr)                               \
 586        __get_user_unaligned_asm_ll32(val, ptr)
 587#endif
 588#ifdef CONFIG_64BIT
 589#define __GET_USER_UNALIGNED_DW(val, ptr)                               \
 590        __get_user_unaligned_asm(val, "uld", ptr)
 591#endif
 592
 593extern void __get_user_unaligned_unknown(void);
 594
 595#define __get_user_unaligned_common(val, size, ptr)                     \
 596do {                                                                    \
 597        switch (size) {                                                 \
 598        case 1: __get_data_asm(val, "lb", ptr); break;                  \
 599        case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;       \
 600        case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;       \
 601        case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;               \
 602        default: __get_user_unaligned_unknown(); break;                 \
 603        }                                                               \
 604} while (0)
 605
 606#define __get_user_unaligned_nocheck(x,ptr,size)                        \
 607({                                                                      \
 608        int __gu_err;                                                   \
 609                                                                        \
 610        __get_user_unaligned_common((x), size, ptr);                    \
 611        __gu_err;                                                       \
 612})
 613
 614#define __get_user_unaligned_check(x,ptr,size)                          \
 615({                                                                      \
 616        int __gu_err = -EFAULT;                                         \
 617        const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
 618                                                                        \
 619        if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            \
 620                __get_user_unaligned_common((x), size, __gu_ptr);       \
 621                                                                        \
 622        __gu_err;                                                       \
 623})
 624
 625#define __get_data_unaligned_asm(val, insn, addr)                       \
 626{                                                                       \
 627        long __gu_tmp;                                                  \
 628                                                                        \
 629        __asm__ __volatile__(                                           \
 630        "1:     " insn "        %1, %3                          \n"     \
 631        "2:                                                     \n"     \
 632        "       .insn                                           \n"     \
 633        "       .section .fixup,\"ax\"                          \n"     \
 634        "3:     li      %0, %4                                  \n"     \
 635        "       move    %1, $0                                  \n"     \
 636        "       j       2b                                      \n"     \
 637        "       .previous                                       \n"     \
 638        "       .section __ex_table,\"a\"                       \n"     \
 639        "       "__UA_ADDR "\t1b, 3b                            \n"     \
 640        "       "__UA_ADDR "\t1b + 4, 3b                        \n"     \
 641        "       .previous                                       \n"     \
 642        : "=r" (__gu_err), "=r" (__gu_tmp)                              \
 643        : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
 644                                                                        \
 645        (val) = (__typeof__(*(addr))) __gu_tmp;                         \
 646}
 647
 648/*
 649 * Get a long long 64 using 32 bit registers.
 650 */
 651#define __get_user_unaligned_asm_ll32(val, addr)                        \
 652{                                                                       \
 653        unsigned long long __gu_tmp;                                    \
 654                                                                        \
 655        __asm__ __volatile__(                                           \
 656        "1:     ulw     %1, (%3)                                \n"     \
 657        "2:     ulw     %D1, 4(%3)                              \n"     \
 658        "       move    %0, $0                                  \n"     \
 659        "3:                                                     \n"     \
 660        "       .insn                                           \n"     \
 661        "       .section        .fixup,\"ax\"                   \n"     \
 662        "4:     li      %0, %4                                  \n"     \
 663        "       move    %1, $0                                  \n"     \
 664        "       move    %D1, $0                                 \n"     \
 665        "       j       3b                                      \n"     \
 666        "       .previous                                       \n"     \
 667        "       .section        __ex_table,\"a\"                \n"     \
 668        "       " __UA_ADDR "   1b, 4b                          \n"     \
 669        "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
 670        "       " __UA_ADDR "   2b, 4b                          \n"     \
 671        "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
 672        "       .previous                                       \n"     \
 673        : "=r" (__gu_err), "=&r" (__gu_tmp)                             \
 674        : "0" (0), "r" (addr), "i" (-EFAULT));                          \
 675        (val) = (__typeof__(*(addr))) __gu_tmp;                         \
 676}
 677
 678/*
 679 * Yuck.  We need two variants, one for 64bit operation and one
 680 * for 32 bit mode and old iron.
 681 */
 682#ifdef CONFIG_32BIT
 683#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
 684#endif
 685#ifdef CONFIG_64BIT
 686#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
 687#endif
 688
 689#define __put_user_unaligned_common(ptr, size)                          \
 690do {                                                                    \
 691        switch (size) {                                                 \
 692        case 1: __put_data_asm("sb", ptr); break;                       \
 693        case 2: __put_user_unaligned_asm("ush", ptr); break;            \
 694        case 4: __put_user_unaligned_asm("usw", ptr); break;            \
 695        case 8: __PUT_USER_UNALIGNED_DW(ptr); break;                    \
 696        default: __put_user_unaligned_unknown(); break;                 \
 697} while (0)
 698
 699#define __put_user_unaligned_nocheck(x,ptr,size)                        \
 700({                                                                      \
 701        __typeof__(*(ptr)) __pu_val;                                    \
 702        int __pu_err = 0;                                               \
 703                                                                        \
 704        __pu_val = (x);                                                 \
 705        __put_user_unaligned_common(ptr, size);                         \
 706        __pu_err;                                                       \
 707})
 708
 709#define __put_user_unaligned_check(x,ptr,size)                          \
 710({                                                                      \
 711        __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
 712        __typeof__(*(ptr)) __pu_val = (x);                              \
 713        int __pu_err = -EFAULT;                                         \
 714                                                                        \
 715        if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))          \
 716                __put_user_unaligned_common(__pu_addr, size);           \
 717                                                                        \
 718        __pu_err;                                                       \
 719})
 720
 721#define __put_user_unaligned_asm(insn, ptr)                             \
 722{                                                                       \
 723        __asm__ __volatile__(                                           \
 724        "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" \
 725        "2:                                                     \n"     \
 726        "       .insn                                           \n"     \
 727        "       .section        .fixup,\"ax\"                   \n"     \
 728        "3:     li      %0, %4                                  \n"     \
 729        "       j       2b                                      \n"     \
 730        "       .previous                                       \n"     \
 731        "       .section        __ex_table,\"a\"                \n"     \
 732        "       " __UA_ADDR "   1b, 3b                          \n"     \
 733        "       .previous                                       \n"     \
 734        : "=r" (__pu_err)                                               \
 735        : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
 736          "i" (-EFAULT));                                               \
 737}
 738
 739#define __put_user_unaligned_asm_ll32(ptr)                              \
 740{                                                                       \
 741        __asm__ __volatile__(                                           \
 742        "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
 743        "2:     sw      %D2, 4(%3)                              \n"     \
 744        "3:                                                     \n"     \
 745        "       .insn                                           \n"     \
 746        "       .section        .fixup,\"ax\"                   \n"     \
 747        "4:     li      %0, %4                                  \n"     \
 748        "       j       3b                                      \n"     \
 749        "       .previous                                       \n"     \
 750        "       .section        __ex_table,\"a\"                \n"     \
 751        "       " __UA_ADDR "   1b, 4b                          \n"     \
 752        "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
 753        "       " __UA_ADDR "   2b, 4b                          \n"     \
 754        "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
 755        "       .previous"                                              \
 756        : "=r" (__pu_err)                                               \
 757        : "0" (0), "r" (__pu_val), "r" (ptr),                           \
 758          "i" (-EFAULT));                                               \
 759}
 760
 761extern void __put_user_unaligned_unknown(void);
 762#endif
 763
 764/*
 765 * We're generating jump to subroutines which will be outside the range of
 766 * jump instructions
 767 */
 768#ifdef MODULE
 769#define __MODULE_JAL(destination)                                       \
 770        ".set\tnoat\n\t"                                                \
 771        __UA_LA "\t$1, " #destination "\n\t"                            \
 772        "jalr\t$1\n\t"                                                  \
 773        ".set\tat\n\t"
 774#else
 775#define __MODULE_JAL(destination)                                       \
 776        "jal\t" #destination "\n\t"
 777#endif
 778
 779#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&    \
 780                                              defined(CONFIG_CPU_HAS_PREFETCH))
 781#define DADDI_SCRATCH "$3"
 782#else
 783#define DADDI_SCRATCH "$0"
 784#endif
 785
 786extern size_t __copy_user(void *__to, const void *__from, size_t __n);
 787
 788#ifndef CONFIG_EVA
 789#define __invoke_copy_to_user(to, from, n)                              \
 790({                                                                      \
 791        register void __user *__cu_to_r __asm__("$4");                  \
 792        register const void *__cu_from_r __asm__("$5");                 \
 793        register long __cu_len_r __asm__("$6");                         \
 794                                                                        \
 795        __cu_to_r = (to);                                               \
 796        __cu_from_r = (from);                                           \
 797        __cu_len_r = (n);                                               \
 798        __asm__ __volatile__(                                           \
 799        __MODULE_JAL(__copy_user)                                       \
 800        : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
 801        :                                                               \
 802        : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
 803          DADDI_SCRATCH, "memory");                                     \
 804        __cu_len_r;                                                     \
 805})
 806
 807#define __invoke_copy_to_kernel(to, from, n)                            \
 808        __invoke_copy_to_user(to, from, n)
 809
 810#endif
 811
 812/*
 813 * __copy_to_user: - Copy a block of data into user space, with less checking.
 814 * @to:   Destination address, in user space.
 815 * @from: Source address, in kernel space.
 816 * @n:    Number of bytes to copy.
 817 *
 818 * Context: User context only.  This function may sleep.
 819 *
 820 * Copy data from kernel space to user space.  Caller must check
 821 * the specified block with access_ok() before calling this function.
 822 *
 823 * Returns number of bytes that could not be copied.
 824 * On success, this will be zero.
 825 */
 826#define __copy_to_user(to, from, n)                                     \
 827({                                                                      \
 828        void __user *__cu_to;                                           \
 829        const void *__cu_from;                                          \
 830        long __cu_len;                                                  \
 831                                                                        \
 832        __cu_to = (to);                                                 \
 833        __cu_from = (from);                                             \
 834        __cu_len = (n);                                                 \
 835        might_fault();                                                  \
 836        if (segment_eq(get_fs(), get_ds()))                             \
 837                __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
 838                                                   __cu_len);           \
 839        else                                                            \
 840                __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
 841                                                 __cu_len);             \
 842        __cu_len;                                                       \
 843})
 844
 845extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
 846
 847#define __copy_to_user_inatomic(to, from, n)                            \
 848({                                                                      \
 849        void __user *__cu_to;                                           \
 850        const void *__cu_from;                                          \
 851        long __cu_len;                                                  \
 852                                                                        \
 853        __cu_to = (to);                                                 \
 854        __cu_from = (from);                                             \
 855        __cu_len = (n);                                                 \
 856        if (segment_eq(get_fs(), get_ds()))                             \
 857                __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
 858                                                   __cu_len);           \
 859        else                                                            \
 860                __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
 861                                                 __cu_len);             \
 862        __cu_len;                                                       \
 863})
 864
 865#define __copy_from_user_inatomic(to, from, n)                          \
 866({                                                                      \
 867        void *__cu_to;                                                  \
 868        const void __user *__cu_from;                                   \
 869        long __cu_len;                                                  \
 870                                                                        \
 871        __cu_to = (to);                                                 \
 872        __cu_from = (from);                                             \
 873        __cu_len = (n);                                                 \
 874        if (segment_eq(get_fs(), get_ds()))                             \
 875                __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,  \
 876                                                              __cu_from,\
 877                                                              __cu_len);\
 878        else                                                            \
 879                __cu_len = __invoke_copy_from_user_inatomic(__cu_to,    \
 880                                                            __cu_from,  \
 881                                                            __cu_len);  \
 882        __cu_len;                                                       \
 883})
 884
 885/*
 886 * copy_to_user: - Copy a block of data into user space.
 887 * @to:   Destination address, in user space.
 888 * @from: Source address, in kernel space.
 889 * @n:    Number of bytes to copy.
 890 *
 891 * Context: User context only.  This function may sleep.
 892 *
 893 * Copy data from kernel space to user space.
 894 *
 895 * Returns number of bytes that could not be copied.
 896 * On success, this will be zero.
 897 */
 898#define copy_to_user(to, from, n)                                       \
 899({                                                                      \
 900        void __user *__cu_to;                                           \
 901        const void *__cu_from;                                          \
 902        long __cu_len;                                                  \
 903                                                                        \
 904        __cu_to = (to);                                                 \
 905        __cu_from = (from);                                             \
 906        __cu_len = (n);                                                 \
 907        if (segment_eq(get_fs(), get_ds())) {                           \
 908                __cu_len = __invoke_copy_to_kernel(__cu_to,             \
 909                                                   __cu_from,           \
 910                                                   __cu_len);           \
 911        } else {                                                        \
 912                if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
 913                        might_fault();                                  \
 914                        __cu_len = __invoke_copy_to_user(__cu_to,       \
 915                                                         __cu_from,     \
 916                                                         __cu_len);     \
 917                }                                                       \
 918        }                                                               \
 919        __cu_len;                                                       \
 920})
 921
 922#ifndef CONFIG_EVA
 923
 924#define __invoke_copy_from_user(to, from, n)                            \
 925({                                                                      \
 926        register void *__cu_to_r __asm__("$4");                         \
 927        register const void __user *__cu_from_r __asm__("$5");          \
 928        register long __cu_len_r __asm__("$6");                         \
 929                                                                        \
 930        __cu_to_r = (to);                                               \
 931        __cu_from_r = (from);                                           \
 932        __cu_len_r = (n);                                               \
 933        __asm__ __volatile__(                                           \
 934        ".set\tnoreorder\n\t"                                           \
 935        __MODULE_JAL(__copy_user)                                       \
 936        ".set\tnoat\n\t"                                                \
 937        __UA_ADDU "\t$1, %1, %2\n\t"                                    \
 938        ".set\tat\n\t"                                                  \
 939        ".set\treorder"                                                 \
 940        : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
 941        :                                                               \
 942        : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
 943          DADDI_SCRATCH, "memory");                                     \
 944        __cu_len_r;                                                     \
 945})
 946
 947#define __invoke_copy_from_kernel(to, from, n)                          \
 948        __invoke_copy_from_user(to, from, n)
 949
 950/* For userland <-> userland operations */
 951#define ___invoke_copy_in_user(to, from, n)                             \
 952        __invoke_copy_from_user(to, from, n)
 953
 954/* For kernel <-> kernel operations */
 955#define ___invoke_copy_in_kernel(to, from, n)                           \
 956        __invoke_copy_from_user(to, from, n)
 957
 958#define __invoke_copy_from_user_inatomic(to, from, n)                   \
 959({                                                                      \
 960        register void *__cu_to_r __asm__("$4");                         \
 961        register const void __user *__cu_from_r __asm__("$5");          \
 962        register long __cu_len_r __asm__("$6");                         \
 963                                                                        \
 964        __cu_to_r = (to);                                               \
 965        __cu_from_r = (from);                                           \
 966        __cu_len_r = (n);                                               \
 967        __asm__ __volatile__(                                           \
 968        ".set\tnoreorder\n\t"                                           \
 969        __MODULE_JAL(__copy_user_inatomic)                              \
 970        ".set\tnoat\n\t"                                                \
 971        __UA_ADDU "\t$1, %1, %2\n\t"                                    \
 972        ".set\tat\n\t"                                                  \
 973        ".set\treorder"                                                 \
 974        : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
 975        :                                                               \
 976        : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
 977          DADDI_SCRATCH, "memory");                                     \
 978        __cu_len_r;                                                     \
 979})
 980
 981#define __invoke_copy_from_kernel_inatomic(to, from, n)                 \
 982        __invoke_copy_from_user_inatomic(to, from, n)                   \
 983
 984#else
 985
 986/* EVA specific functions */
 987
 988extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
 989                                       size_t __n);
 990extern size_t __copy_from_user_eva(void *__to, const void *__from,
 991                                   size_t __n);
 992extern size_t __copy_to_user_eva(void *__to, const void *__from,
 993                                 size_t __n);
 994extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
 995
 996#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)      \
 997({                                                                      \
 998        register void *__cu_to_r __asm__("$4");                         \
 999        register const void __user *__cu_from_r __asm__("$5");          \
1000        register long __cu_len_r __asm__("$6");                         \
1001                                                                        \
1002        __cu_to_r = (to);                                               \
1003        __cu_from_r = (from);                                           \
1004        __cu_len_r = (n);                                               \
1005        __asm__ __volatile__(                                           \
1006        ".set\tnoreorder\n\t"                                           \
1007        __MODULE_JAL(func_ptr)                                          \
1008        ".set\tnoat\n\t"                                                \
1009        __UA_ADDU "\t$1, %1, %2\n\t"                                    \
1010        ".set\tat\n\t"                                                  \
1011        ".set\treorder"                                                 \
1012        : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1013        :                                                               \
1014        : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1015          DADDI_SCRATCH, "memory");                                     \
1016        __cu_len_r;                                                     \
1017})
1018
1019#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)        \
1020({                                                                      \
1021        register void *__cu_to_r __asm__("$4");                         \
1022        register const void __user *__cu_from_r __asm__("$5");          \
1023        register long __cu_len_r __asm__("$6");                         \
1024                                                                        \
1025        __cu_to_r = (to);                                               \
1026        __cu_from_r = (from);                                           \
1027        __cu_len_r = (n);                                               \
1028        __asm__ __volatile__(                                           \
1029        __MODULE_JAL(func_ptr)                                          \
1030        : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1031        :                                                               \
1032        : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1033          DADDI_SCRATCH, "memory");                                     \
1034        __cu_len_r;                                                     \
1035})
1036
1037/*
1038 * Source or destination address is in userland. We need to go through
1039 * the TLB
1040 */
1041#define __invoke_copy_from_user(to, from, n)                            \
1042        __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1043
1044#define __invoke_copy_from_user_inatomic(to, from, n)                   \
1045        __invoke_copy_from_user_eva_generic(to, from, n,                \
1046                                            __copy_user_inatomic_eva)
1047
1048#define __invoke_copy_to_user(to, from, n)                              \
1049        __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1050
1051#define ___invoke_copy_in_user(to, from, n)                             \
1052        __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1053
1054/*
1055 * Source or destination address in the kernel. We are not going through
1056 * the TLB
1057 */
1058#define __invoke_copy_from_kernel(to, from, n)                          \
1059        __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1060
1061#define __invoke_copy_from_kernel_inatomic(to, from, n)                 \
1062        __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1063
1064#define __invoke_copy_to_kernel(to, from, n)                            \
1065        __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1066
1067#define ___invoke_copy_in_kernel(to, from, n)                           \
1068        __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1069
1070#endif /* CONFIG_EVA */
1071
1072/*
1073 * __copy_from_user: - Copy a block of data from user space, with less checking.
1074 * @to:   Destination address, in kernel space.
1075 * @from: Source address, in user space.
1076 * @n:    Number of bytes to copy.
1077 *
1078 * Context: User context only.  This function may sleep.
1079 *
1080 * Copy data from user space to kernel space.  Caller must check
1081 * the specified block with access_ok() before calling this function.
1082 *
1083 * Returns number of bytes that could not be copied.
1084 * On success, this will be zero.
1085 *
1086 * If some data could not be copied, this function will pad the copied
1087 * data to the requested size using zero bytes.
1088 */
1089#define __copy_from_user(to, from, n)                                   \
1090({                                                                      \
1091        void *__cu_to;                                                  \
1092        const void __user *__cu_from;                                   \
1093        long __cu_len;                                                  \
1094                                                                        \
1095        __cu_to = (to);                                                 \
1096        __cu_from = (from);                                             \
1097        __cu_len = (n);                                                 \
1098        might_fault();                                                  \
1099        __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          \
1100                                           __cu_len);                   \
1101        __cu_len;                                                       \
1102})
1103
1104/*
1105 * copy_from_user: - Copy a block of data from user space.
1106 * @to:   Destination address, in kernel space.
1107 * @from: Source address, in user space.
1108 * @n:    Number of bytes to copy.
1109 *
1110 * Context: User context only.  This function may sleep.
1111 *
1112 * Copy data from user space to kernel space.
1113 *
1114 * Returns number of bytes that could not be copied.
1115 * On success, this will be zero.
1116 *
1117 * If some data could not be copied, this function will pad the copied
1118 * data to the requested size using zero bytes.
1119 */
1120#define copy_from_user(to, from, n)                                     \
1121({                                                                      \
1122        void *__cu_to;                                                  \
1123        const void __user *__cu_from;                                   \
1124        long __cu_len;                                                  \
1125                                                                        \
1126        __cu_to = (to);                                                 \
1127        __cu_from = (from);                                             \
1128        __cu_len = (n);                                                 \
1129        if (segment_eq(get_fs(), get_ds())) {                           \
1130                __cu_len = __invoke_copy_from_kernel(__cu_to,           \
1131                                                     __cu_from,         \
1132                                                     __cu_len);         \
1133        } else {                                                        \
1134                if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {      \
1135                        might_fault();                                  \
1136                        __cu_len = __invoke_copy_from_user(__cu_to,     \
1137                                                           __cu_from,   \
1138                                                           __cu_len);   \
1139                }                                                       \
1140        }                                                               \
1141        __cu_len;                                                       \
1142})
1143
1144#define __copy_in_user(to, from, n)                                     \
1145({                                                                      \
1146        void __user *__cu_to;                                           \
1147        const void __user *__cu_from;                                   \
1148        long __cu_len;                                                  \
1149                                                                        \
1150        __cu_to = (to);                                                 \
1151        __cu_from = (from);                                             \
1152        __cu_len = (n);                                                 \
1153        if (segment_eq(get_fs(), get_ds())) {                           \
1154                __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1155                                                    __cu_len);          \
1156        } else {                                                        \
1157                might_fault();                                          \
1158                __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,   \
1159                                                  __cu_len);            \
1160        }                                                               \
1161        __cu_len;                                                       \
1162})
1163
1164#define copy_in_user(to, from, n)                                       \
1165({                                                                      \
1166        void __user *__cu_to;                                           \
1167        const void __user *__cu_from;                                   \
1168        long __cu_len;                                                  \
1169                                                                        \
1170        __cu_to = (to);                                                 \
1171        __cu_from = (from);                                             \
1172        __cu_len = (n);                                                 \
1173        if (segment_eq(get_fs(), get_ds())) {                           \
1174                __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,  \
1175                                                    __cu_len);          \
1176        } else {                                                        \
1177                if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1178                           access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1179                        might_fault();                                  \
1180                        __cu_len = ___invoke_copy_in_user(__cu_to,      \
1181                                                          __cu_from,    \
1182                                                          __cu_len);    \
1183                }                                                       \
1184        }                                                               \
1185        __cu_len;                                                       \
1186})
1187
1188/*
1189 * __clear_user: - Zero a block of memory in user space, with less checking.
1190 * @to:   Destination address, in user space.
1191 * @n:    Number of bytes to zero.
1192 *
1193 * Zero a block of memory in user space.  Caller must check
1194 * the specified block with access_ok() before calling this function.
1195 *
1196 * Returns number of bytes that could not be cleared.
1197 * On success, this will be zero.
1198 */
1199static inline __kernel_size_t
1200__clear_user(void __user *addr, __kernel_size_t size)
1201{
1202        __kernel_size_t res;
1203
1204        might_fault();
1205        __asm__ __volatile__(
1206                "move\t$4, %1\n\t"
1207                "move\t$5, $0\n\t"
1208                "move\t$6, %2\n\t"
1209                __MODULE_JAL(__bzero)
1210                "move\t%0, $6"
1211                : "=r" (res)
1212                : "r" (addr), "r" (size)
1213                : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1214
1215        return res;
1216}
1217
1218#define clear_user(addr,n)                                              \
1219({                                                                      \
1220        void __user * __cl_addr = (addr);                               \
1221        unsigned long __cl_size = (n);                                  \
1222        if (__cl_size && access_ok(VERIFY_WRITE,                        \
1223                                        __cl_addr, __cl_size))          \
1224                __cl_size = __clear_user(__cl_addr, __cl_size);         \
1225        __cl_size;                                                      \
1226})
1227
1228/*
1229 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1230 * @dst:   Destination address, in kernel space.  This buffer must be at
1231 *         least @count bytes long.
1232 * @src:   Source address, in user space.
1233 * @count: Maximum number of bytes to copy, including the trailing NUL.
1234 *
1235 * Copies a NUL-terminated string from userspace to kernel space.
1236 * Caller must check the specified block with access_ok() before calling
1237 * this function.
1238 *
1239 * On success, returns the length of the string (not including the trailing
1240 * NUL).
1241 *
1242 * If access to userspace fails, returns -EFAULT (some data may have been
1243 * copied).
1244 *
1245 * If @count is smaller than the length of the string, copies @count bytes
1246 * and returns @count.
1247 */
1248static inline long
1249__strncpy_from_user(char *__to, const char __user *__from, long __len)
1250{
1251        long res;
1252
1253        if (segment_eq(get_fs(), get_ds())) {
1254                __asm__ __volatile__(
1255                        "move\t$4, %1\n\t"
1256                        "move\t$5, %2\n\t"
1257                        "move\t$6, %3\n\t"
1258                        __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1259                        "move\t%0, $2"
1260                        : "=r" (res)
1261                        : "r" (__to), "r" (__from), "r" (__len)
1262                        : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1263        } else {
1264                might_fault();
1265                __asm__ __volatile__(
1266                        "move\t$4, %1\n\t"
1267                        "move\t$5, %2\n\t"
1268                        "move\t$6, %3\n\t"
1269                        __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1270                        "move\t%0, $2"
1271                        : "=r" (res)
1272                        : "r" (__to), "r" (__from), "r" (__len)
1273                        : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1274        }
1275
1276        return res;
1277}
1278
1279/*
1280 * strncpy_from_user: - Copy a NUL terminated string from userspace.
1281 * @dst:   Destination address, in kernel space.  This buffer must be at
1282 *         least @count bytes long.
1283 * @src:   Source address, in user space.
1284 * @count: Maximum number of bytes to copy, including the trailing NUL.
1285 *
1286 * Copies a NUL-terminated string from userspace to kernel space.
1287 *
1288 * On success, returns the length of the string (not including the trailing
1289 * NUL).
1290 *
1291 * If access to userspace fails, returns -EFAULT (some data may have been
1292 * copied).
1293 *
1294 * If @count is smaller than the length of the string, copies @count bytes
1295 * and returns @count.
1296 */
1297static inline long
1298strncpy_from_user(char *__to, const char __user *__from, long __len)
1299{
1300        long res;
1301
1302        if (segment_eq(get_fs(), get_ds())) {
1303                __asm__ __volatile__(
1304                        "move\t$4, %1\n\t"
1305                        "move\t$5, %2\n\t"
1306                        "move\t$6, %3\n\t"
1307                        __MODULE_JAL(__strncpy_from_kernel_asm)
1308                        "move\t%0, $2"
1309                        : "=r" (res)
1310                        : "r" (__to), "r" (__from), "r" (__len)
1311                        : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1312        } else {
1313                might_fault();
1314                __asm__ __volatile__(
1315                        "move\t$4, %1\n\t"
1316                        "move\t$5, %2\n\t"
1317                        "move\t$6, %3\n\t"
1318                        __MODULE_JAL(__strncpy_from_user_asm)
1319                        "move\t%0, $2"
1320                        : "=r" (res)
1321                        : "r" (__to), "r" (__from), "r" (__len)
1322                        : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1323        }
1324
1325        return res;
1326}
1327
1328/*
1329 * strlen_user: - Get the size of a string in user space.
1330 * @str: The string to measure.
1331 *
1332 * Context: User context only.  This function may sleep.
1333 *
1334 * Get the size of a NUL-terminated string in user space.
1335 *
1336 * Returns the size of the string INCLUDING the terminating NUL.
1337 * On exception, returns 0.
1338 *
1339 * If there is a limit on the length of a valid string, you may wish to
1340 * consider using strnlen_user() instead.
1341 */
1342static inline long strlen_user(const char __user *s)
1343{
1344        long res;
1345
1346        if (segment_eq(get_fs(), get_ds())) {
1347                __asm__ __volatile__(
1348                        "move\t$4, %1\n\t"
1349                        __MODULE_JAL(__strlen_kernel_asm)
1350                        "move\t%0, $2"
1351                        : "=r" (res)
1352                        : "r" (s)
1353                        : "$2", "$4", __UA_t0, "$31");
1354        } else {
1355                might_fault();
1356                __asm__ __volatile__(
1357                        "move\t$4, %1\n\t"
1358                        __MODULE_JAL(__strlen_kernel_asm)
1359                        "move\t%0, $2"
1360                        : "=r" (res)
1361                        : "r" (s)
1362                        : "$2", "$4", __UA_t0, "$31");
1363        }
1364
1365        return res;
1366}
1367
1368/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1369static inline long __strnlen_user(const char __user *s, long n)
1370{
1371        long res;
1372
1373        if (segment_eq(get_fs(), get_ds())) {
1374                __asm__ __volatile__(
1375                        "move\t$4, %1\n\t"
1376                        "move\t$5, %2\n\t"
1377                        __MODULE_JAL(__strnlen_kernel_nocheck_asm)
1378                        "move\t%0, $2"
1379                        : "=r" (res)
1380                        : "r" (s), "r" (n)
1381                        : "$2", "$4", "$5", __UA_t0, "$31");
1382        } else {
1383                might_fault();
1384                __asm__ __volatile__(
1385                        "move\t$4, %1\n\t"
1386                        "move\t$5, %2\n\t"
1387                        __MODULE_JAL(__strnlen_user_nocheck_asm)
1388                        "move\t%0, $2"
1389                        : "=r" (res)
1390                        : "r" (s), "r" (n)
1391                        : "$2", "$4", "$5", __UA_t0, "$31");
1392        }
1393
1394        return res;
1395}
1396
1397/*
1398 * strnlen_user: - Get the size of a string in user space.
1399 * @str: The string to measure.
1400 *
1401 * Context: User context only.  This function may sleep.
1402 *
1403 * Get the size of a NUL-terminated string in user space.
1404 *
1405 * Returns the size of the string INCLUDING the terminating NUL.
1406 * On exception, returns 0.
1407 * If the string is too long, returns a value greater than @n.
1408 */
1409static inline long strnlen_user(const char __user *s, long n)
1410{
1411        long res;
1412
1413        might_fault();
1414        if (segment_eq(get_fs(), get_ds())) {
1415                __asm__ __volatile__(
1416                        "move\t$4, %1\n\t"
1417                        "move\t$5, %2\n\t"
1418                        __MODULE_JAL(__strnlen_kernel_asm)
1419                        "move\t%0, $2"
1420                        : "=r" (res)
1421                        : "r" (s), "r" (n)
1422                        : "$2", "$4", "$5", __UA_t0, "$31");
1423        } else {
1424                __asm__ __volatile__(
1425                        "move\t$4, %1\n\t"
1426                        "move\t$5, %2\n\t"
1427                        __MODULE_JAL(__strnlen_user_asm)
1428                        "move\t%0, $2"
1429                        : "=r" (res)
1430                        : "r" (s), "r" (n)
1431                        : "$2", "$4", "$5", __UA_t0, "$31");
1432        }
1433
1434        return res;
1435}
1436
1437struct exception_table_entry
1438{
1439        unsigned long insn;
1440        unsigned long nextinsn;
1441};
1442
1443extern int fixup_exception(struct pt_regs *regs);
1444
1445#endif /* _ASM_UACCESS_H */
1446