linux/arch/mips/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
   7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   8 * Copyright (C) 2007  Maciej W. Rozycki
   9 * Copyright (C) 2014, Imagination Technologies Ltd.
  10 */
  11#ifndef _ASM_UACCESS_H
  12#define _ASM_UACCESS_H
  13
  14#include <linux/kernel.h>
  15#include <linux/string.h>
  16#include <asm/asm-eva.h>
  17#include <asm/extable.h>
  18
  19/*
  20 * The fs value determines whether argument validity checking should be
  21 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  22 * get_fs() == KERNEL_DS, checking is bypassed.
  23 *
  24 * For historical reasons, these macros are grossly misnamed.
  25 */
  26#ifdef CONFIG_32BIT
  27
  28#ifdef CONFIG_KVM_GUEST
  29#define __UA_LIMIT 0x40000000UL
  30#else
  31#define __UA_LIMIT 0x80000000UL
  32#endif
  33
  34#define __UA_ADDR       ".word"
  35#define __UA_LA         "la"
  36#define __UA_ADDU       "addu"
  37#define __UA_t0         "$8"
  38#define __UA_t1         "$9"
  39
  40#endif /* CONFIG_32BIT */
  41
  42#ifdef CONFIG_64BIT
  43
  44extern u64 __ua_limit;
  45
  46#define __UA_LIMIT      __ua_limit
  47
  48#define __UA_ADDR       ".dword"
  49#define __UA_LA         "dla"
  50#define __UA_ADDU       "daddu"
  51#define __UA_t0         "$12"
  52#define __UA_t1         "$13"
  53
  54#endif /* CONFIG_64BIT */
  55
  56/*
  57 * USER_DS is a bitmask that has the bits set that may not be set in a valid
  58 * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
  59 * the arithmetic we're doing only works if the limit is a power of two, so
  60 * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
  61 * address in this range it's the process's problem, not ours :-)
  62 */
  63
  64#ifdef CONFIG_KVM_GUEST
  65#define KERNEL_DS       ((mm_segment_t) { 0x80000000UL })
  66#define USER_DS         ((mm_segment_t) { 0xC0000000UL })
  67#else
  68#define KERNEL_DS       ((mm_segment_t) { 0UL })
  69#define USER_DS         ((mm_segment_t) { __UA_LIMIT })
  70#endif
  71
  72#define get_fs()        (current_thread_info()->addr_limit)
  73#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  74
  75#define segment_eq(a, b)        ((a).seg == (b).seg)
  76
  77/*
  78 * eva_kernel_access() - determine whether kernel memory access on an EVA system
  79 *
  80 * Determines whether memory accesses should be performed to kernel memory
  81 * on a system using Extended Virtual Addressing (EVA).
  82 *
  83 * Return: true if a kernel memory access on an EVA system, else false.
  84 */
  85static inline bool eva_kernel_access(void)
  86{
  87        if (!IS_ENABLED(CONFIG_EVA))
  88                return false;
  89
  90        return uaccess_kernel();
  91}
  92
  93/*
  94 * Is a address valid? This does a straightforward calculation rather
  95 * than tests.
  96 *
  97 * Address valid if:
  98 *  - "addr" doesn't have any high-bits set
  99 *  - AND "size" doesn't have any high-bits set
 100 *  - AND "addr+size" doesn't have any high-bits set
 101 *  - OR we are in kernel mode.
 102 *
 103 * __ua_size() is a trick to avoid runtime checking of positive constant
 104 * sizes; for those we already know at compile time that the size is ok.
 105 */
 106#define __ua_size(size)                                                 \
 107        ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
 108
 109/*
 110 * access_ok: - Checks if a user space pointer is valid
 111 * @addr: User space pointer to start of block to check
 112 * @size: Size of block to check
 113 *
 114 * Context: User context only. This function may sleep if pagefaults are
 115 *          enabled.
 116 *
 117 * Checks if a pointer to a block of memory in user space is valid.
 118 *
 119 * Returns true (nonzero) if the memory block may be valid, false (zero)
 120 * if it is definitely invalid.
 121 *
 122 * Note that, depending on architecture, this function probably just
 123 * checks that the pointer is in the user space range - after calling
 124 * this function, memory access functions may still return -EFAULT.
 125 */
 126
 127static inline int __access_ok(const void __user *p, unsigned long size)
 128{
 129        unsigned long addr = (unsigned long)p;
 130        return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
 131}
 132
 133#define access_ok(addr, size)                                   \
 134        likely(__access_ok((addr), (size)))
 135
 136/*
 137 * put_user: - Write a simple value into user space.
 138 * @x:   Value to copy to user space.
 139 * @ptr: Destination address, in user space.
 140 *
 141 * Context: User context only. This function may sleep if pagefaults are
 142 *          enabled.
 143 *
 144 * This macro copies a single simple value from kernel space to user
 145 * space.  It supports simple types like char and int, but not larger
 146 * data types like structures or arrays.
 147 *
 148 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 149 * to the result of dereferencing @ptr.
 150 *
 151 * Returns zero on success, or -EFAULT on error.
 152 */
 153#define put_user(x,ptr) \
 154        __put_user_check((x), (ptr), sizeof(*(ptr)))
 155
 156/*
 157 * get_user: - Get a simple variable from user space.
 158 * @x:   Variable to store result.
 159 * @ptr: Source address, in user space.
 160 *
 161 * Context: User context only. This function may sleep if pagefaults are
 162 *          enabled.
 163 *
 164 * This macro copies a single simple variable from user space to kernel
 165 * space.  It supports simple types like char and int, but not larger
 166 * data types like structures or arrays.
 167 *
 168 * @ptr must have pointer-to-simple-variable type, and the result of
 169 * dereferencing @ptr must be assignable to @x without a cast.
 170 *
 171 * Returns zero on success, or -EFAULT on error.
 172 * On error, the variable @x is set to zero.
 173 */
 174#define get_user(x,ptr) \
 175        __get_user_check((x), (ptr), sizeof(*(ptr)))
 176
 177/*
 178 * __put_user: - Write a simple value into user space, with less checking.
 179 * @x:   Value to copy to user space.
 180 * @ptr: Destination address, in user space.
 181 *
 182 * Context: User context only. This function may sleep if pagefaults are
 183 *          enabled.
 184 *
 185 * This macro copies a single simple value from kernel space to user
 186 * space.  It supports simple types like char and int, but not larger
 187 * data types like structures or arrays.
 188 *
 189 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 190 * to the result of dereferencing @ptr.
 191 *
 192 * Caller must check the pointer with access_ok() before calling this
 193 * function.
 194 *
 195 * Returns zero on success, or -EFAULT on error.
 196 */
 197#define __put_user(x,ptr) \
 198        __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
 199
 200/*
 201 * __get_user: - Get a simple variable from user space, with less checking.
 202 * @x:   Variable to store result.
 203 * @ptr: Source address, in user space.
 204 *
 205 * Context: User context only. This function may sleep if pagefaults are
 206 *          enabled.
 207 *
 208 * This macro copies a single simple variable from user space to kernel
 209 * space.  It supports simple types like char and int, but not larger
 210 * data types like structures or arrays.
 211 *
 212 * @ptr must have pointer-to-simple-variable type, and the result of
 213 * dereferencing @ptr must be assignable to @x without a cast.
 214 *
 215 * Caller must check the pointer with access_ok() before calling this
 216 * function.
 217 *
 218 * Returns zero on success, or -EFAULT on error.
 219 * On error, the variable @x is set to zero.
 220 */
 221#define __get_user(x,ptr) \
 222        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 223
 224struct __large_struct { unsigned long buf[100]; };
 225#define __m(x) (*(struct __large_struct __user *)(x))
 226
 227/*
 228 * Yuck.  We need two variants, one for 64bit operation and one
 229 * for 32 bit mode and old iron.
 230 */
 231#ifndef CONFIG_EVA
 232#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
 233#else
 234/*
 235 * Kernel specific functions for EVA. We need to use normal load instructions
 236 * to read data from kernel when operating in EVA mode. We use these macros to
 237 * avoid redefining __get_user_asm for EVA.
 238 */
 239#undef _loadd
 240#undef _loadw
 241#undef _loadh
 242#undef _loadb
 243#ifdef CONFIG_32BIT
 244#define _loadd                  _loadw
 245#else
 246#define _loadd(reg, addr)       "ld " reg ", " addr
 247#endif
 248#define _loadw(reg, addr)       "lw " reg ", " addr
 249#define _loadh(reg, addr)       "lh " reg ", " addr
 250#define _loadb(reg, addr)       "lb " reg ", " addr
 251
 252#define __get_kernel_common(val, size, ptr)                             \
 253do {                                                                    \
 254        switch (size) {                                                 \
 255        case 1: __get_data_asm(val, _loadb, ptr); break;                \
 256        case 2: __get_data_asm(val, _loadh, ptr); break;                \
 257        case 4: __get_data_asm(val, _loadw, ptr); break;                \
 258        case 8: __GET_DW(val, _loadd, ptr); break;                      \
 259        default: __get_user_unknown(); break;                           \
 260        }                                                               \
 261} while (0)
 262#endif
 263
 264#ifdef CONFIG_32BIT
 265#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
 266#endif
 267#ifdef CONFIG_64BIT
 268#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
 269#endif
 270
 271extern void __get_user_unknown(void);
 272
 273#define __get_user_common(val, size, ptr)                               \
 274do {                                                                    \
 275        switch (size) {                                                 \
 276        case 1: __get_data_asm(val, user_lb, ptr); break;               \
 277        case 2: __get_data_asm(val, user_lh, ptr); break;               \
 278        case 4: __get_data_asm(val, user_lw, ptr); break;               \
 279        case 8: __GET_DW(val, user_ld, ptr); break;                     \
 280        default: __get_user_unknown(); break;                           \
 281        }                                                               \
 282} while (0)
 283
 284#define __get_user_nocheck(x, ptr, size)                                \
 285({                                                                      \
 286        int __gu_err;                                                   \
 287                                                                        \
 288        if (eva_kernel_access()) {                                      \
 289                __get_kernel_common((x), size, ptr);                    \
 290        } else {                                                        \
 291                __chk_user_ptr(ptr);                                    \
 292                __get_user_common((x), size, ptr);                      \
 293        }                                                               \
 294        __gu_err;                                                       \
 295})
 296
 297#define __get_user_check(x, ptr, size)                                  \
 298({                                                                      \
 299        int __gu_err = -EFAULT;                                         \
 300        const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
 301                                                                        \
 302        might_fault();                                                  \
 303        if (likely(access_ok( __gu_ptr, size))) {               \
 304                if (eva_kernel_access())                                \
 305                        __get_kernel_common((x), size, __gu_ptr);       \
 306                else                                                    \
 307                        __get_user_common((x), size, __gu_ptr);         \
 308        } else                                                          \
 309                (x) = 0;                                                \
 310                                                                        \
 311        __gu_err;                                                       \
 312})
 313
 314#define __get_data_asm(val, insn, addr)                                 \
 315{                                                                       \
 316        long __gu_tmp;                                                  \
 317                                                                        \
 318        __asm__ __volatile__(                                           \
 319        "1:     "insn("%1", "%3")"                              \n"     \
 320        "2:                                                     \n"     \
 321        "       .insn                                           \n"     \
 322        "       .section .fixup,\"ax\"                          \n"     \
 323        "3:     li      %0, %4                                  \n"     \
 324        "       move    %1, $0                                  \n"     \
 325        "       j       2b                                      \n"     \
 326        "       .previous                                       \n"     \
 327        "       .section __ex_table,\"a\"                       \n"     \
 328        "       "__UA_ADDR "\t1b, 3b                            \n"     \
 329        "       .previous                                       \n"     \
 330        : "=r" (__gu_err), "=r" (__gu_tmp)                              \
 331        : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
 332                                                                        \
 333        (val) = (__typeof__(*(addr))) __gu_tmp;                         \
 334}
 335
 336/*
 337 * Get a long long 64 using 32 bit registers.
 338 */
 339#define __get_data_asm_ll32(val, insn, addr)                            \
 340{                                                                       \
 341        union {                                                         \
 342                unsigned long long      l;                              \
 343                __typeof__(*(addr))     t;                              \
 344        } __gu_tmp;                                                     \
 345                                                                        \
 346        __asm__ __volatile__(                                           \
 347        "1:     " insn("%1", "(%3)")"                           \n"     \
 348        "2:     " insn("%D1", "4(%3)")"                         \n"     \
 349        "3:                                                     \n"     \
 350        "       .insn                                           \n"     \
 351        "       .section        .fixup,\"ax\"                   \n"     \
 352        "4:     li      %0, %4                                  \n"     \
 353        "       move    %1, $0                                  \n"     \
 354        "       move    %D1, $0                                 \n"     \
 355        "       j       3b                                      \n"     \
 356        "       .previous                                       \n"     \
 357        "       .section        __ex_table,\"a\"                \n"     \
 358        "       " __UA_ADDR "   1b, 4b                          \n"     \
 359        "       " __UA_ADDR "   2b, 4b                          \n"     \
 360        "       .previous                                       \n"     \
 361        : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           \
 362        : "0" (0), "r" (addr), "i" (-EFAULT));                          \
 363                                                                        \
 364        (val) = __gu_tmp.t;                                             \
 365}
 366
 367#ifndef CONFIG_EVA
 368#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
 369#else
 370/*
 371 * Kernel specific functions for EVA. We need to use normal load instructions
 372 * to read data from kernel when operating in EVA mode. We use these macros to
 373 * avoid redefining __get_data_asm for EVA.
 374 */
 375#undef _stored
 376#undef _storew
 377#undef _storeh
 378#undef _storeb
 379#ifdef CONFIG_32BIT
 380#define _stored                 _storew
 381#else
 382#define _stored(reg, addr)      "ld " reg ", " addr
 383#endif
 384
 385#define _storew(reg, addr)      "sw " reg ", " addr
 386#define _storeh(reg, addr)      "sh " reg ", " addr
 387#define _storeb(reg, addr)      "sb " reg ", " addr
 388
 389#define __put_kernel_common(ptr, size)                                  \
 390do {                                                                    \
 391        switch (size) {                                                 \
 392        case 1: __put_data_asm(_storeb, ptr); break;                    \
 393        case 2: __put_data_asm(_storeh, ptr); break;                    \
 394        case 4: __put_data_asm(_storew, ptr); break;                    \
 395        case 8: __PUT_DW(_stored, ptr); break;                          \
 396        default: __put_user_unknown(); break;                           \
 397        }                                                               \
 398} while(0)
 399#endif
 400
 401/*
 402 * Yuck.  We need two variants, one for 64bit operation and one
 403 * for 32 bit mode and old iron.
 404 */
 405#ifdef CONFIG_32BIT
 406#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
 407#endif
 408#ifdef CONFIG_64BIT
 409#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
 410#endif
 411
 412#define __put_user_common(ptr, size)                                    \
 413do {                                                                    \
 414        switch (size) {                                                 \
 415        case 1: __put_data_asm(user_sb, ptr); break;                    \
 416        case 2: __put_data_asm(user_sh, ptr); break;                    \
 417        case 4: __put_data_asm(user_sw, ptr); break;                    \
 418        case 8: __PUT_DW(user_sd, ptr); break;                          \
 419        default: __put_user_unknown(); break;                           \
 420        }                                                               \
 421} while (0)
 422
 423#define __put_user_nocheck(x, ptr, size)                                \
 424({                                                                      \
 425        __typeof__(*(ptr)) __pu_val;                                    \
 426        int __pu_err = 0;                                               \
 427                                                                        \
 428        __pu_val = (x);                                                 \
 429        if (eva_kernel_access()) {                                      \
 430                __put_kernel_common(ptr, size);                         \
 431        } else {                                                        \
 432                __chk_user_ptr(ptr);                                    \
 433                __put_user_common(ptr, size);                           \
 434        }                                                               \
 435        __pu_err;                                                       \
 436})
 437
 438#define __put_user_check(x, ptr, size)                                  \
 439({                                                                      \
 440        __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
 441        __typeof__(*(ptr)) __pu_val = (x);                              \
 442        int __pu_err = -EFAULT;                                         \
 443                                                                        \
 444        might_fault();                                                  \
 445        if (likely(access_ok( __pu_addr, size))) {      \
 446                if (eva_kernel_access())                                \
 447                        __put_kernel_common(__pu_addr, size);           \
 448                else                                                    \
 449                        __put_user_common(__pu_addr, size);             \
 450        }                                                               \
 451                                                                        \
 452        __pu_err;                                                       \
 453})
 454
 455#define __put_data_asm(insn, ptr)                                       \
 456{                                                                       \
 457        __asm__ __volatile__(                                           \
 458        "1:     "insn("%z2", "%3")"     # __put_data_asm        \n"     \
 459        "2:                                                     \n"     \
 460        "       .insn                                           \n"     \
 461        "       .section        .fixup,\"ax\"                   \n"     \
 462        "3:     li      %0, %4                                  \n"     \
 463        "       j       2b                                      \n"     \
 464        "       .previous                                       \n"     \
 465        "       .section        __ex_table,\"a\"                \n"     \
 466        "       " __UA_ADDR "   1b, 3b                          \n"     \
 467        "       .previous                                       \n"     \
 468        : "=r" (__pu_err)                                               \
 469        : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
 470          "i" (-EFAULT));                                               \
 471}
 472
 473#define __put_data_asm_ll32(insn, ptr)                                  \
 474{                                                                       \
 475        __asm__ __volatile__(                                           \
 476        "1:     "insn("%2", "(%3)")"    # __put_data_asm_ll32   \n"     \
 477        "2:     "insn("%D2", "4(%3)")"                          \n"     \
 478        "3:                                                     \n"     \
 479        "       .insn                                           \n"     \
 480        "       .section        .fixup,\"ax\"                   \n"     \
 481        "4:     li      %0, %4                                  \n"     \
 482        "       j       3b                                      \n"     \
 483        "       .previous                                       \n"     \
 484        "       .section        __ex_table,\"a\"                \n"     \
 485        "       " __UA_ADDR "   1b, 4b                          \n"     \
 486        "       " __UA_ADDR "   2b, 4b                          \n"     \
 487        "       .previous"                                              \
 488        : "=r" (__pu_err)                                               \
 489        : "0" (0), "r" (__pu_val), "r" (ptr),                           \
 490          "i" (-EFAULT));                                               \
 491}
 492
 493extern void __put_user_unknown(void);
 494
 495/*
 496 * We're generating jump to subroutines which will be outside the range of
 497 * jump instructions
 498 */
 499#ifdef MODULE
 500#define __MODULE_JAL(destination)                                       \
 501        ".set\tnoat\n\t"                                                \
 502        __UA_LA "\t$1, " #destination "\n\t"                            \
 503        "jalr\t$1\n\t"                                                  \
 504        ".set\tat\n\t"
 505#else
 506#define __MODULE_JAL(destination)                                       \
 507        "jal\t" #destination "\n\t"
 508#endif
 509
 510#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&    \
 511                                              defined(CONFIG_CPU_HAS_PREFETCH))
 512#define DADDI_SCRATCH "$3"
 513#else
 514#define DADDI_SCRATCH "$0"
 515#endif
 516
 517extern size_t __copy_user(void *__to, const void *__from, size_t __n);
 518
 519#define __invoke_copy_from(func, to, from, n)                           \
 520({                                                                      \
 521        register void *__cu_to_r __asm__("$4");                         \
 522        register const void __user *__cu_from_r __asm__("$5");          \
 523        register long __cu_len_r __asm__("$6");                         \
 524                                                                        \
 525        __cu_to_r = (to);                                               \
 526        __cu_from_r = (from);                                           \
 527        __cu_len_r = (n);                                               \
 528        __asm__ __volatile__(                                           \
 529        ".set\tnoreorder\n\t"                                           \
 530        __MODULE_JAL(func)                                              \
 531        ".set\tnoat\n\t"                                                \
 532        __UA_ADDU "\t$1, %1, %2\n\t"                                    \
 533        ".set\tat\n\t"                                                  \
 534        ".set\treorder"                                                 \
 535        : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
 536        :                                                               \
 537        : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
 538          DADDI_SCRATCH, "memory");                                     \
 539        __cu_len_r;                                                     \
 540})
 541
 542#define __invoke_copy_to(func, to, from, n)                             \
 543({                                                                      \
 544        register void __user *__cu_to_r __asm__("$4");                  \
 545        register const void *__cu_from_r __asm__("$5");                 \
 546        register long __cu_len_r __asm__("$6");                         \
 547                                                                        \
 548        __cu_to_r = (to);                                               \
 549        __cu_from_r = (from);                                           \
 550        __cu_len_r = (n);                                               \
 551        __asm__ __volatile__(                                           \
 552        __MODULE_JAL(func)                                              \
 553        : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
 554        :                                                               \
 555        : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
 556          DADDI_SCRATCH, "memory");                                     \
 557        __cu_len_r;                                                     \
 558})
 559
 560#define __invoke_copy_from_kernel(to, from, n)                          \
 561        __invoke_copy_from(__copy_user, to, from, n)
 562
 563#define __invoke_copy_to_kernel(to, from, n)                            \
 564        __invoke_copy_to(__copy_user, to, from, n)
 565
 566#define ___invoke_copy_in_kernel(to, from, n)                           \
 567        __invoke_copy_from(__copy_user, to, from, n)
 568
 569#ifndef CONFIG_EVA
 570#define __invoke_copy_from_user(to, from, n)                            \
 571        __invoke_copy_from(__copy_user, to, from, n)
 572
 573#define __invoke_copy_to_user(to, from, n)                              \
 574        __invoke_copy_to(__copy_user, to, from, n)
 575
 576#define ___invoke_copy_in_user(to, from, n)                             \
 577        __invoke_copy_from(__copy_user, to, from, n)
 578
 579#else
 580
 581/* EVA specific functions */
 582
 583extern size_t __copy_from_user_eva(void *__to, const void *__from,
 584                                   size_t __n);
 585extern size_t __copy_to_user_eva(void *__to, const void *__from,
 586                                 size_t __n);
 587extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
 588
 589/*
 590 * Source or destination address is in userland. We need to go through
 591 * the TLB
 592 */
 593#define __invoke_copy_from_user(to, from, n)                            \
 594        __invoke_copy_from(__copy_from_user_eva, to, from, n)
 595
 596#define __invoke_copy_to_user(to, from, n)                              \
 597        __invoke_copy_to(__copy_to_user_eva, to, from, n)
 598
 599#define ___invoke_copy_in_user(to, from, n)                             \
 600        __invoke_copy_from(__copy_in_user_eva, to, from, n)
 601
 602#endif /* CONFIG_EVA */
 603
 604static inline unsigned long
 605raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 606{
 607        if (eva_kernel_access())
 608                return __invoke_copy_to_kernel(to, from, n);
 609        else
 610                return __invoke_copy_to_user(to, from, n);
 611}
 612
 613static inline unsigned long
 614raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 615{
 616        if (eva_kernel_access())
 617                return __invoke_copy_from_kernel(to, from, n);
 618        else
 619                return __invoke_copy_from_user(to, from, n);
 620}
 621
 622#define INLINE_COPY_FROM_USER
 623#define INLINE_COPY_TO_USER
 624
 625static inline unsigned long
 626raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
 627{
 628        if (eva_kernel_access())
 629                return ___invoke_copy_in_kernel(to, from, n);
 630        else
 631                return ___invoke_copy_in_user(to, from, n);
 632}
 633
 634extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
 635extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
 636
 637/*
 638 * __clear_user: - Zero a block of memory in user space, with less checking.
 639 * @to:   Destination address, in user space.
 640 * @n:    Number of bytes to zero.
 641 *
 642 * Zero a block of memory in user space.  Caller must check
 643 * the specified block with access_ok() before calling this function.
 644 *
 645 * Returns number of bytes that could not be cleared.
 646 * On success, this will be zero.
 647 */
 648static inline __kernel_size_t
 649__clear_user(void __user *addr, __kernel_size_t size)
 650{
 651        __kernel_size_t res;
 652
 653#ifdef CONFIG_CPU_MICROMIPS
 654/* micromips memset / bzero also clobbers t7 & t8 */
 655#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
 656#else
 657#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
 658#endif /* CONFIG_CPU_MICROMIPS */
 659
 660        if (eva_kernel_access()) {
 661                __asm__ __volatile__(
 662                        "move\t$4, %1\n\t"
 663                        "move\t$5, $0\n\t"
 664                        "move\t$6, %2\n\t"
 665                        __MODULE_JAL(__bzero_kernel)
 666                        "move\t%0, $6"
 667                        : "=r" (res)
 668                        : "r" (addr), "r" (size)
 669                        : bzero_clobbers);
 670        } else {
 671                might_fault();
 672                __asm__ __volatile__(
 673                        "move\t$4, %1\n\t"
 674                        "move\t$5, $0\n\t"
 675                        "move\t$6, %2\n\t"
 676                        __MODULE_JAL(__bzero)
 677                        "move\t%0, $6"
 678                        : "=r" (res)
 679                        : "r" (addr), "r" (size)
 680                        : bzero_clobbers);
 681        }
 682
 683        return res;
 684}
 685
 686#define clear_user(addr,n)                                              \
 687({                                                                      \
 688        void __user * __cl_addr = (addr);                               \
 689        unsigned long __cl_size = (n);                                  \
 690        if (__cl_size && access_ok(__cl_addr, __cl_size))               \
 691                __cl_size = __clear_user(__cl_addr, __cl_size);         \
 692        __cl_size;                                                      \
 693})
 694
 695extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
 696extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
 697
 698/*
 699 * strncpy_from_user: - Copy a NUL terminated string from userspace.
 700 * @dst:   Destination address, in kernel space.  This buffer must be at
 701 *         least @count bytes long.
 702 * @src:   Source address, in user space.
 703 * @count: Maximum number of bytes to copy, including the trailing NUL.
 704 *
 705 * Copies a NUL-terminated string from userspace to kernel space.
 706 *
 707 * On success, returns the length of the string (not including the trailing
 708 * NUL).
 709 *
 710 * If access to userspace fails, returns -EFAULT (some data may have been
 711 * copied).
 712 *
 713 * If @count is smaller than the length of the string, copies @count bytes
 714 * and returns @count.
 715 */
 716static inline long
 717strncpy_from_user(char *__to, const char __user *__from, long __len)
 718{
 719        long res;
 720
 721        if (eva_kernel_access()) {
 722                __asm__ __volatile__(
 723                        "move\t$4, %1\n\t"
 724                        "move\t$5, %2\n\t"
 725                        "move\t$6, %3\n\t"
 726                        __MODULE_JAL(__strncpy_from_kernel_asm)
 727                        "move\t%0, $2"
 728                        : "=r" (res)
 729                        : "r" (__to), "r" (__from), "r" (__len)
 730                        : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
 731        } else {
 732                might_fault();
 733                __asm__ __volatile__(
 734                        "move\t$4, %1\n\t"
 735                        "move\t$5, %2\n\t"
 736                        "move\t$6, %3\n\t"
 737                        __MODULE_JAL(__strncpy_from_user_asm)
 738                        "move\t%0, $2"
 739                        : "=r" (res)
 740                        : "r" (__to), "r" (__from), "r" (__len)
 741                        : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
 742        }
 743
 744        return res;
 745}
 746
 747extern long __strnlen_kernel_asm(const char __user *s, long n);
 748extern long __strnlen_user_asm(const char __user *s, long n);
 749
 750/*
 751 * strnlen_user: - Get the size of a string in user space.
 752 * @str: The string to measure.
 753 *
 754 * Context: User context only. This function may sleep if pagefaults are
 755 *          enabled.
 756 *
 757 * Get the size of a NUL-terminated string in user space.
 758 *
 759 * Returns the size of the string INCLUDING the terminating NUL.
 760 * On exception, returns 0.
 761 * If the string is too long, returns a value greater than @n.
 762 */
 763static inline long strnlen_user(const char __user *s, long n)
 764{
 765        long res;
 766
 767        might_fault();
 768        if (eva_kernel_access()) {
 769                __asm__ __volatile__(
 770                        "move\t$4, %1\n\t"
 771                        "move\t$5, %2\n\t"
 772                        __MODULE_JAL(__strnlen_kernel_asm)
 773                        "move\t%0, $2"
 774                        : "=r" (res)
 775                        : "r" (s), "r" (n)
 776                        : "$2", "$4", "$5", __UA_t0, "$31");
 777        } else {
 778                __asm__ __volatile__(
 779                        "move\t$4, %1\n\t"
 780                        "move\t$5, %2\n\t"
 781                        __MODULE_JAL(__strnlen_user_asm)
 782                        "move\t%0, $2"
 783                        : "=r" (res)
 784                        : "r" (s), "r" (n)
 785                        : "$2", "$4", "$5", __UA_t0, "$31");
 786        }
 787
 788        return res;
 789}
 790
 791#endif /* _ASM_UACCESS_H */
 792