linux/arch/mips/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
   7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   8 * Copyright (C) 2007  Maciej W. Rozycki
   9 * Copyright (C) 2014, Imagination Technologies Ltd.
  10 */
  11#ifndef _ASM_UACCESS_H
  12#define _ASM_UACCESS_H
  13
  14#include <linux/kernel.h>
  15#include <linux/string.h>
  16#include <asm/asm-eva.h>
  17#include <asm/extable.h>
  18
  19/*
  20 * The fs value determines whether argument validity checking should be
  21 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  22 * get_fs() == KERNEL_DS, checking is bypassed.
  23 *
  24 * For historical reasons, these macros are grossly misnamed.
  25 */
  26#ifdef CONFIG_32BIT
  27
  28#ifdef CONFIG_KVM_GUEST
  29#define __UA_LIMIT 0x40000000UL
  30#else
  31#define __UA_LIMIT 0x80000000UL
  32#endif
  33
  34#define __UA_ADDR       ".word"
  35#define __UA_LA         "la"
  36#define __UA_ADDU       "addu"
  37#define __UA_t0         "$8"
  38#define __UA_t1         "$9"
  39
  40#endif /* CONFIG_32BIT */
  41
  42#ifdef CONFIG_64BIT
  43
  44extern u64 __ua_limit;
  45
  46#define __UA_LIMIT      __ua_limit
  47
  48#define __UA_ADDR       ".dword"
  49#define __UA_LA         "dla"
  50#define __UA_ADDU       "daddu"
  51#define __UA_t0         "$12"
  52#define __UA_t1         "$13"
  53
  54#endif /* CONFIG_64BIT */
  55
  56/*
  57 * USER_DS is a bitmask that has the bits set that may not be set in a valid
  58 * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
  59 * the arithmetic we're doing only works if the limit is a power of two, so
  60 * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
  61 * address in this range it's the process's problem, not ours :-)
  62 */
  63
  64#ifdef CONFIG_KVM_GUEST
  65#define KERNEL_DS       ((mm_segment_t) { 0x80000000UL })
  66#define USER_DS         ((mm_segment_t) { 0xC0000000UL })
  67#else
  68#define KERNEL_DS       ((mm_segment_t) { 0UL })
  69#define USER_DS         ((mm_segment_t) { __UA_LIMIT })
  70#endif
  71
  72#define get_ds()        (KERNEL_DS)
  73#define get_fs()        (current_thread_info()->addr_limit)
  74#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  75
  76#define segment_eq(a, b)        ((a).seg == (b).seg)
  77
  78/*
  79 * eva_kernel_access() - determine whether kernel memory access on an EVA system
  80 *
  81 * Determines whether memory accesses should be performed to kernel memory
  82 * on a system using Extended Virtual Addressing (EVA).
  83 *
  84 * Return: true if a kernel memory access on an EVA system, else false.
  85 */
  86static inline bool eva_kernel_access(void)
  87{
  88        if (!IS_ENABLED(CONFIG_EVA))
  89                return false;
  90
  91        return uaccess_kernel();
  92}
  93
  94/*
  95 * Is a address valid? This does a straightforward calculation rather
  96 * than tests.
  97 *
  98 * Address valid if:
  99 *  - "addr" doesn't have any high-bits set
 100 *  - AND "size" doesn't have any high-bits set
 101 *  - AND "addr+size" doesn't have any high-bits set
 102 *  - OR we are in kernel mode.
 103 *
 104 * __ua_size() is a trick to avoid runtime checking of positive constant
 105 * sizes; for those we already know at compile time that the size is ok.
 106 */
 107#define __ua_size(size)                                                 \
 108        ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
 109
 110/*
 111 * access_ok: - Checks if a user space pointer is valid
 112 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
 113 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
 114 *        to write to a block, it is always safe to read from it.
 115 * @addr: User space pointer to start of block to check
 116 * @size: Size of block to check
 117 *
 118 * Context: User context only. This function may sleep if pagefaults are
 119 *          enabled.
 120 *
 121 * Checks if a pointer to a block of memory in user space is valid.
 122 *
 123 * Returns true (nonzero) if the memory block may be valid, false (zero)
 124 * if it is definitely invalid.
 125 *
 126 * Note that, depending on architecture, this function probably just
 127 * checks that the pointer is in the user space range - after calling
 128 * this function, memory access functions may still return -EFAULT.
 129 */
 130
 131static inline int __access_ok(const void __user *p, unsigned long size)
 132{
 133        unsigned long addr = (unsigned long)p;
 134        return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
 135}
 136
 137#define access_ok(type, addr, size)                                     \
 138        likely(__access_ok((addr), (size)))
 139
 140/*
 141 * put_user: - Write a simple value into user space.
 142 * @x:   Value to copy to user space.
 143 * @ptr: Destination address, in user space.
 144 *
 145 * Context: User context only. This function may sleep if pagefaults are
 146 *          enabled.
 147 *
 148 * This macro copies a single simple value from kernel space to user
 149 * space.  It supports simple types like char and int, but not larger
 150 * data types like structures or arrays.
 151 *
 152 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 153 * to the result of dereferencing @ptr.
 154 *
 155 * Returns zero on success, or -EFAULT on error.
 156 */
 157#define put_user(x,ptr) \
 158        __put_user_check((x), (ptr), sizeof(*(ptr)))
 159
 160/*
 161 * get_user: - Get a simple variable from user space.
 162 * @x:   Variable to store result.
 163 * @ptr: Source address, in user space.
 164 *
 165 * Context: User context only. This function may sleep if pagefaults are
 166 *          enabled.
 167 *
 168 * This macro copies a single simple variable from user space to kernel
 169 * space.  It supports simple types like char and int, but not larger
 170 * data types like structures or arrays.
 171 *
 172 * @ptr must have pointer-to-simple-variable type, and the result of
 173 * dereferencing @ptr must be assignable to @x without a cast.
 174 *
 175 * Returns zero on success, or -EFAULT on error.
 176 * On error, the variable @x is set to zero.
 177 */
 178#define get_user(x,ptr) \
 179        __get_user_check((x), (ptr), sizeof(*(ptr)))
 180
 181/*
 182 * __put_user: - Write a simple value into user space, with less checking.
 183 * @x:   Value to copy to user space.
 184 * @ptr: Destination address, in user space.
 185 *
 186 * Context: User context only. This function may sleep if pagefaults are
 187 *          enabled.
 188 *
 189 * This macro copies a single simple value from kernel space to user
 190 * space.  It supports simple types like char and int, but not larger
 191 * data types like structures or arrays.
 192 *
 193 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 194 * to the result of dereferencing @ptr.
 195 *
 196 * Caller must check the pointer with access_ok() before calling this
 197 * function.
 198 *
 199 * Returns zero on success, or -EFAULT on error.
 200 */
 201#define __put_user(x,ptr) \
 202        __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
 203
 204/*
 205 * __get_user: - Get a simple variable from user space, with less checking.
 206 * @x:   Variable to store result.
 207 * @ptr: Source address, in user space.
 208 *
 209 * Context: User context only. This function may sleep if pagefaults are
 210 *          enabled.
 211 *
 212 * This macro copies a single simple variable from user space to kernel
 213 * space.  It supports simple types like char and int, but not larger
 214 * data types like structures or arrays.
 215 *
 216 * @ptr must have pointer-to-simple-variable type, and the result of
 217 * dereferencing @ptr must be assignable to @x without a cast.
 218 *
 219 * Caller must check the pointer with access_ok() before calling this
 220 * function.
 221 *
 222 * Returns zero on success, or -EFAULT on error.
 223 * On error, the variable @x is set to zero.
 224 */
 225#define __get_user(x,ptr) \
 226        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 227
 228struct __large_struct { unsigned long buf[100]; };
 229#define __m(x) (*(struct __large_struct __user *)(x))
 230
 231/*
 232 * Yuck.  We need two variants, one for 64bit operation and one
 233 * for 32 bit mode and old iron.
 234 */
 235#ifndef CONFIG_EVA
 236#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
 237#else
 238/*
 239 * Kernel specific functions for EVA. We need to use normal load instructions
 240 * to read data from kernel when operating in EVA mode. We use these macros to
 241 * avoid redefining __get_user_asm for EVA.
 242 */
 243#undef _loadd
 244#undef _loadw
 245#undef _loadh
 246#undef _loadb
 247#ifdef CONFIG_32BIT
 248#define _loadd                  _loadw
 249#else
 250#define _loadd(reg, addr)       "ld " reg ", " addr
 251#endif
 252#define _loadw(reg, addr)       "lw " reg ", " addr
 253#define _loadh(reg, addr)       "lh " reg ", " addr
 254#define _loadb(reg, addr)       "lb " reg ", " addr
 255
 256#define __get_kernel_common(val, size, ptr)                             \
 257do {                                                                    \
 258        switch (size) {                                                 \
 259        case 1: __get_data_asm(val, _loadb, ptr); break;                \
 260        case 2: __get_data_asm(val, _loadh, ptr); break;                \
 261        case 4: __get_data_asm(val, _loadw, ptr); break;                \
 262        case 8: __GET_DW(val, _loadd, ptr); break;                      \
 263        default: __get_user_unknown(); break;                           \
 264        }                                                               \
 265} while (0)
 266#endif
 267
 268#ifdef CONFIG_32BIT
 269#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
 270#endif
 271#ifdef CONFIG_64BIT
 272#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
 273#endif
 274
 275extern void __get_user_unknown(void);
 276
 277#define __get_user_common(val, size, ptr)                               \
 278do {                                                                    \
 279        switch (size) {                                                 \
 280        case 1: __get_data_asm(val, user_lb, ptr); break;               \
 281        case 2: __get_data_asm(val, user_lh, ptr); break;               \
 282        case 4: __get_data_asm(val, user_lw, ptr); break;               \
 283        case 8: __GET_DW(val, user_ld, ptr); break;                     \
 284        default: __get_user_unknown(); break;                           \
 285        }                                                               \
 286} while (0)
 287
 288#define __get_user_nocheck(x, ptr, size)                                \
 289({                                                                      \
 290        int __gu_err;                                                   \
 291                                                                        \
 292        if (eva_kernel_access()) {                                      \
 293                __get_kernel_common((x), size, ptr);                    \
 294        } else {                                                        \
 295                __chk_user_ptr(ptr);                                    \
 296                __get_user_common((x), size, ptr);                      \
 297        }                                                               \
 298        __gu_err;                                                       \
 299})
 300
 301#define __get_user_check(x, ptr, size)                                  \
 302({                                                                      \
 303        int __gu_err = -EFAULT;                                         \
 304        const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
 305                                                                        \
 306        might_fault();                                                  \
 307        if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {          \
 308                if (eva_kernel_access())                                \
 309                        __get_kernel_common((x), size, __gu_ptr);       \
 310                else                                                    \
 311                        __get_user_common((x), size, __gu_ptr);         \
 312        } else                                                          \
 313                (x) = 0;                                                \
 314                                                                        \
 315        __gu_err;                                                       \
 316})
 317
 318#define __get_data_asm(val, insn, addr)                                 \
 319{                                                                       \
 320        long __gu_tmp;                                                  \
 321                                                                        \
 322        __asm__ __volatile__(                                           \
 323        "1:     "insn("%1", "%3")"                              \n"     \
 324        "2:                                                     \n"     \
 325        "       .insn                                           \n"     \
 326        "       .section .fixup,\"ax\"                          \n"     \
 327        "3:     li      %0, %4                                  \n"     \
 328        "       move    %1, $0                                  \n"     \
 329        "       j       2b                                      \n"     \
 330        "       .previous                                       \n"     \
 331        "       .section __ex_table,\"a\"                       \n"     \
 332        "       "__UA_ADDR "\t1b, 3b                            \n"     \
 333        "       .previous                                       \n"     \
 334        : "=r" (__gu_err), "=r" (__gu_tmp)                              \
 335        : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
 336                                                                        \
 337        (val) = (__typeof__(*(addr))) __gu_tmp;                         \
 338}
 339
 340/*
 341 * Get a long long 64 using 32 bit registers.
 342 */
 343#define __get_data_asm_ll32(val, insn, addr)                            \
 344{                                                                       \
 345        union {                                                         \
 346                unsigned long long      l;                              \
 347                __typeof__(*(addr))     t;                              \
 348        } __gu_tmp;                                                     \
 349                                                                        \
 350        __asm__ __volatile__(                                           \
 351        "1:     " insn("%1", "(%3)")"                           \n"     \
 352        "2:     " insn("%D1", "4(%3)")"                         \n"     \
 353        "3:                                                     \n"     \
 354        "       .insn                                           \n"     \
 355        "       .section        .fixup,\"ax\"                   \n"     \
 356        "4:     li      %0, %4                                  \n"     \
 357        "       move    %1, $0                                  \n"     \
 358        "       move    %D1, $0                                 \n"     \
 359        "       j       3b                                      \n"     \
 360        "       .previous                                       \n"     \
 361        "       .section        __ex_table,\"a\"                \n"     \
 362        "       " __UA_ADDR "   1b, 4b                          \n"     \
 363        "       " __UA_ADDR "   2b, 4b                          \n"     \
 364        "       .previous                                       \n"     \
 365        : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           \
 366        : "0" (0), "r" (addr), "i" (-EFAULT));                          \
 367                                                                        \
 368        (val) = __gu_tmp.t;                                             \
 369}
 370
 371#ifndef CONFIG_EVA
 372#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
 373#else
 374/*
 375 * Kernel specific functions for EVA. We need to use normal load instructions
 376 * to read data from kernel when operating in EVA mode. We use these macros to
 377 * avoid redefining __get_data_asm for EVA.
 378 */
 379#undef _stored
 380#undef _storew
 381#undef _storeh
 382#undef _storeb
 383#ifdef CONFIG_32BIT
 384#define _stored                 _storew
 385#else
 386#define _stored(reg, addr)      "ld " reg ", " addr
 387#endif
 388
 389#define _storew(reg, addr)      "sw " reg ", " addr
 390#define _storeh(reg, addr)      "sh " reg ", " addr
 391#define _storeb(reg, addr)      "sb " reg ", " addr
 392
 393#define __put_kernel_common(ptr, size)                                  \
 394do {                                                                    \
 395        switch (size) {                                                 \
 396        case 1: __put_data_asm(_storeb, ptr); break;                    \
 397        case 2: __put_data_asm(_storeh, ptr); break;                    \
 398        case 4: __put_data_asm(_storew, ptr); break;                    \
 399        case 8: __PUT_DW(_stored, ptr); break;                          \
 400        default: __put_user_unknown(); break;                           \
 401        }                                                               \
 402} while(0)
 403#endif
 404
 405/*
 406 * Yuck.  We need two variants, one for 64bit operation and one
 407 * for 32 bit mode and old iron.
 408 */
 409#ifdef CONFIG_32BIT
 410#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
 411#endif
 412#ifdef CONFIG_64BIT
 413#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
 414#endif
 415
 416#define __put_user_common(ptr, size)                                    \
 417do {                                                                    \
 418        switch (size) {                                                 \
 419        case 1: __put_data_asm(user_sb, ptr); break;                    \
 420        case 2: __put_data_asm(user_sh, ptr); break;                    \
 421        case 4: __put_data_asm(user_sw, ptr); break;                    \
 422        case 8: __PUT_DW(user_sd, ptr); break;                          \
 423        default: __put_user_unknown(); break;                           \
 424        }                                                               \
 425} while (0)
 426
 427#define __put_user_nocheck(x, ptr, size)                                \
 428({                                                                      \
 429        __typeof__(*(ptr)) __pu_val;                                    \
 430        int __pu_err = 0;                                               \
 431                                                                        \
 432        __pu_val = (x);                                                 \
 433        if (eva_kernel_access()) {                                      \
 434                __put_kernel_common(ptr, size);                         \
 435        } else {                                                        \
 436                __chk_user_ptr(ptr);                                    \
 437                __put_user_common(ptr, size);                           \
 438        }                                                               \
 439        __pu_err;                                                       \
 440})
 441
 442#define __put_user_check(x, ptr, size)                                  \
 443({                                                                      \
 444        __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
 445        __typeof__(*(ptr)) __pu_val = (x);                              \
 446        int __pu_err = -EFAULT;                                         \
 447                                                                        \
 448        might_fault();                                                  \
 449        if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
 450                if (eva_kernel_access())                                \
 451                        __put_kernel_common(__pu_addr, size);           \
 452                else                                                    \
 453                        __put_user_common(__pu_addr, size);             \
 454        }                                                               \
 455                                                                        \
 456        __pu_err;                                                       \
 457})
 458
 459#define __put_data_asm(insn, ptr)                                       \
 460{                                                                       \
 461        __asm__ __volatile__(                                           \
 462        "1:     "insn("%z2", "%3")"     # __put_data_asm        \n"     \
 463        "2:                                                     \n"     \
 464        "       .insn                                           \n"     \
 465        "       .section        .fixup,\"ax\"                   \n"     \
 466        "3:     li      %0, %4                                  \n"     \
 467        "       j       2b                                      \n"     \
 468        "       .previous                                       \n"     \
 469        "       .section        __ex_table,\"a\"                \n"     \
 470        "       " __UA_ADDR "   1b, 3b                          \n"     \
 471        "       .previous                                       \n"     \
 472        : "=r" (__pu_err)                                               \
 473        : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
 474          "i" (-EFAULT));                                               \
 475}
 476
 477#define __put_data_asm_ll32(insn, ptr)                                  \
 478{                                                                       \
 479        __asm__ __volatile__(                                           \
 480        "1:     "insn("%2", "(%3)")"    # __put_data_asm_ll32   \n"     \
 481        "2:     "insn("%D2", "4(%3)")"                          \n"     \
 482        "3:                                                     \n"     \
 483        "       .insn                                           \n"     \
 484        "       .section        .fixup,\"ax\"                   \n"     \
 485        "4:     li      %0, %4                                  \n"     \
 486        "       j       3b                                      \n"     \
 487        "       .previous                                       \n"     \
 488        "       .section        __ex_table,\"a\"                \n"     \
 489        "       " __UA_ADDR "   1b, 4b                          \n"     \
 490        "       " __UA_ADDR "   2b, 4b                          \n"     \
 491        "       .previous"                                              \
 492        : "=r" (__pu_err)                                               \
 493        : "0" (0), "r" (__pu_val), "r" (ptr),                           \
 494          "i" (-EFAULT));                                               \
 495}
 496
 497extern void __put_user_unknown(void);
 498
 499/*
 500 * We're generating jump to subroutines which will be outside the range of
 501 * jump instructions
 502 */
 503#ifdef MODULE
 504#define __MODULE_JAL(destination)                                       \
 505        ".set\tnoat\n\t"                                                \
 506        __UA_LA "\t$1, " #destination "\n\t"                            \
 507        "jalr\t$1\n\t"                                                  \
 508        ".set\tat\n\t"
 509#else
 510#define __MODULE_JAL(destination)                                       \
 511        "jal\t" #destination "\n\t"
 512#endif
 513
 514#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&    \
 515                                              defined(CONFIG_CPU_HAS_PREFETCH))
 516#define DADDI_SCRATCH "$3"
 517#else
 518#define DADDI_SCRATCH "$0"
 519#endif
 520
 521extern size_t __copy_user(void *__to, const void *__from, size_t __n);
 522
 523#define __invoke_copy_from(func, to, from, n)                           \
 524({                                                                      \
 525        register void *__cu_to_r __asm__("$4");                         \
 526        register const void __user *__cu_from_r __asm__("$5");          \
 527        register long __cu_len_r __asm__("$6");                         \
 528                                                                        \
 529        __cu_to_r = (to);                                               \
 530        __cu_from_r = (from);                                           \
 531        __cu_len_r = (n);                                               \
 532        __asm__ __volatile__(                                           \
 533        ".set\tnoreorder\n\t"                                           \
 534        __MODULE_JAL(func)                                              \
 535        ".set\tnoat\n\t"                                                \
 536        __UA_ADDU "\t$1, %1, %2\n\t"                                    \
 537        ".set\tat\n\t"                                                  \
 538        ".set\treorder"                                                 \
 539        : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
 540        :                                                               \
 541        : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
 542          DADDI_SCRATCH, "memory");                                     \
 543        __cu_len_r;                                                     \
 544})
 545
 546#define __invoke_copy_to(func, to, from, n)                             \
 547({                                                                      \
 548        register void __user *__cu_to_r __asm__("$4");                  \
 549        register const void *__cu_from_r __asm__("$5");                 \
 550        register long __cu_len_r __asm__("$6");                         \
 551                                                                        \
 552        __cu_to_r = (to);                                               \
 553        __cu_from_r = (from);                                           \
 554        __cu_len_r = (n);                                               \
 555        __asm__ __volatile__(                                           \
 556        __MODULE_JAL(func)                                              \
 557        : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
 558        :                                                               \
 559        : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
 560          DADDI_SCRATCH, "memory");                                     \
 561        __cu_len_r;                                                     \
 562})
 563
 564#define __invoke_copy_from_kernel(to, from, n)                          \
 565        __invoke_copy_from(__copy_user, to, from, n)
 566
 567#define __invoke_copy_to_kernel(to, from, n)                            \
 568        __invoke_copy_to(__copy_user, to, from, n)
 569
 570#define ___invoke_copy_in_kernel(to, from, n)                           \
 571        __invoke_copy_from(__copy_user, to, from, n)
 572
 573#ifndef CONFIG_EVA
 574#define __invoke_copy_from_user(to, from, n)                            \
 575        __invoke_copy_from(__copy_user, to, from, n)
 576
 577#define __invoke_copy_to_user(to, from, n)                              \
 578        __invoke_copy_to(__copy_user, to, from, n)
 579
 580#define ___invoke_copy_in_user(to, from, n)                             \
 581        __invoke_copy_from(__copy_user, to, from, n)
 582
 583#else
 584
 585/* EVA specific functions */
 586
 587extern size_t __copy_from_user_eva(void *__to, const void *__from,
 588                                   size_t __n);
 589extern size_t __copy_to_user_eva(void *__to, const void *__from,
 590                                 size_t __n);
 591extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
 592
 593/*
 594 * Source or destination address is in userland. We need to go through
 595 * the TLB
 596 */
 597#define __invoke_copy_from_user(to, from, n)                            \
 598        __invoke_copy_from(__copy_from_user_eva, to, from, n)
 599
 600#define __invoke_copy_to_user(to, from, n)                              \
 601        __invoke_copy_to(__copy_to_user_eva, to, from, n)
 602
 603#define ___invoke_copy_in_user(to, from, n)                             \
 604        __invoke_copy_from(__copy_in_user_eva, to, from, n)
 605
 606#endif /* CONFIG_EVA */
 607
 608static inline unsigned long
 609raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 610{
 611        if (eva_kernel_access())
 612                return __invoke_copy_to_kernel(to, from, n);
 613        else
 614                return __invoke_copy_to_user(to, from, n);
 615}
 616
 617static inline unsigned long
 618raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 619{
 620        if (eva_kernel_access())
 621                return __invoke_copy_from_kernel(to, from, n);
 622        else
 623                return __invoke_copy_from_user(to, from, n);
 624}
 625
 626#define INLINE_COPY_FROM_USER
 627#define INLINE_COPY_TO_USER
 628
 629static inline unsigned long
 630raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
 631{
 632        if (eva_kernel_access())
 633                return ___invoke_copy_in_kernel(to, from, n);
 634        else
 635                return ___invoke_copy_in_user(to, from, n);
 636}
 637
 638extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
 639extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
 640
 641/*
 642 * __clear_user: - Zero a block of memory in user space, with less checking.
 643 * @to:   Destination address, in user space.
 644 * @n:    Number of bytes to zero.
 645 *
 646 * Zero a block of memory in user space.  Caller must check
 647 * the specified block with access_ok() before calling this function.
 648 *
 649 * Returns number of bytes that could not be cleared.
 650 * On success, this will be zero.
 651 */
 652static inline __kernel_size_t
 653__clear_user(void __user *addr, __kernel_size_t size)
 654{
 655        __kernel_size_t res;
 656
 657        if (eva_kernel_access()) {
 658                __asm__ __volatile__(
 659                        "move\t$4, %1\n\t"
 660                        "move\t$5, $0\n\t"
 661                        "move\t$6, %2\n\t"
 662                        __MODULE_JAL(__bzero_kernel)
 663                        "move\t%0, $6"
 664                        : "=r" (res)
 665                        : "r" (addr), "r" (size)
 666                        : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
 667        } else {
 668                might_fault();
 669                __asm__ __volatile__(
 670                        "move\t$4, %1\n\t"
 671                        "move\t$5, $0\n\t"
 672                        "move\t$6, %2\n\t"
 673                        __MODULE_JAL(__bzero)
 674                        "move\t%0, $6"
 675                        : "=r" (res)
 676                        : "r" (addr), "r" (size)
 677                        : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
 678        }
 679
 680        return res;
 681}
 682
 683#define clear_user(addr,n)                                              \
 684({                                                                      \
 685        void __user * __cl_addr = (addr);                               \
 686        unsigned long __cl_size = (n);                                  \
 687        if (__cl_size && access_ok(VERIFY_WRITE,                        \
 688                                        __cl_addr, __cl_size))          \
 689                __cl_size = __clear_user(__cl_addr, __cl_size);         \
 690        __cl_size;                                                      \
 691})
 692
 693extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
 694extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
 695
 696/*
 697 * strncpy_from_user: - Copy a NUL terminated string from userspace.
 698 * @dst:   Destination address, in kernel space.  This buffer must be at
 699 *         least @count bytes long.
 700 * @src:   Source address, in user space.
 701 * @count: Maximum number of bytes to copy, including the trailing NUL.
 702 *
 703 * Copies a NUL-terminated string from userspace to kernel space.
 704 *
 705 * On success, returns the length of the string (not including the trailing
 706 * NUL).
 707 *
 708 * If access to userspace fails, returns -EFAULT (some data may have been
 709 * copied).
 710 *
 711 * If @count is smaller than the length of the string, copies @count bytes
 712 * and returns @count.
 713 */
 714static inline long
 715strncpy_from_user(char *__to, const char __user *__from, long __len)
 716{
 717        long res;
 718
 719        if (eva_kernel_access()) {
 720                __asm__ __volatile__(
 721                        "move\t$4, %1\n\t"
 722                        "move\t$5, %2\n\t"
 723                        "move\t$6, %3\n\t"
 724                        __MODULE_JAL(__strncpy_from_kernel_asm)
 725                        "move\t%0, $2"
 726                        : "=r" (res)
 727                        : "r" (__to), "r" (__from), "r" (__len)
 728                        : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
 729        } else {
 730                might_fault();
 731                __asm__ __volatile__(
 732                        "move\t$4, %1\n\t"
 733                        "move\t$5, %2\n\t"
 734                        "move\t$6, %3\n\t"
 735                        __MODULE_JAL(__strncpy_from_user_asm)
 736                        "move\t%0, $2"
 737                        : "=r" (res)
 738                        : "r" (__to), "r" (__from), "r" (__len)
 739                        : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
 740        }
 741
 742        return res;
 743}
 744
 745extern long __strnlen_kernel_asm(const char __user *s, long n);
 746extern long __strnlen_user_asm(const char __user *s, long n);
 747
 748/*
 749 * strnlen_user: - Get the size of a string in user space.
 750 * @str: The string to measure.
 751 *
 752 * Context: User context only. This function may sleep if pagefaults are
 753 *          enabled.
 754 *
 755 * Get the size of a NUL-terminated string in user space.
 756 *
 757 * Returns the size of the string INCLUDING the terminating NUL.
 758 * On exception, returns 0.
 759 * If the string is too long, returns a value greater than @n.
 760 */
 761static inline long strnlen_user(const char __user *s, long n)
 762{
 763        long res;
 764
 765        might_fault();
 766        if (eva_kernel_access()) {
 767                __asm__ __volatile__(
 768                        "move\t$4, %1\n\t"
 769                        "move\t$5, %2\n\t"
 770                        __MODULE_JAL(__strnlen_kernel_asm)
 771                        "move\t%0, $2"
 772                        : "=r" (res)
 773                        : "r" (s), "r" (n)
 774                        : "$2", "$4", "$5", __UA_t0, "$31");
 775        } else {
 776                __asm__ __volatile__(
 777                        "move\t$4, %1\n\t"
 778                        "move\t$5, %2\n\t"
 779                        __MODULE_JAL(__strnlen_user_asm)
 780                        "move\t%0, $2"
 781                        : "=r" (res)
 782                        : "r" (s), "r" (n)
 783                        : "$2", "$4", "$5", __UA_t0, "$31");
 784        }
 785
 786        return res;
 787}
 788
 789#endif /* _ASM_UACCESS_H */
 790