linux/arch/m32r/include/asm/uaccess.h
<<
>>
Prefs
   1#ifndef _ASM_M32R_UACCESS_H
   2#define _ASM_M32R_UACCESS_H
   3
   4/*
   5 *  linux/include/asm-m32r/uaccess.h
   6 *
   7 *  M32R version.
   8 *    Copyright (C) 2004, 2006  Hirokazu Takata <takata at linux-m32r.org>
   9 */
  10
  11/*
  12 * User space memory access functions
  13 */
  14#include <linux/errno.h>
  15#include <linux/thread_info.h>
  16#include <asm/page.h>
  17#include <asm/setup.h>
  18
  19#define VERIFY_READ 0
  20#define VERIFY_WRITE 1
  21
  22/*
  23 * The fs value determines whether argument validity checking should be
  24 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  25 * get_fs() == KERNEL_DS, checking is bypassed.
  26 *
  27 * For historical reasons, these macros are grossly misnamed.
  28 */
  29
  30#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
  31
  32#ifdef CONFIG_MMU
  33
  34#define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
  35#define USER_DS         MAKE_MM_SEG(PAGE_OFFSET)
  36#define get_ds()        (KERNEL_DS)
  37#define get_fs()        (current_thread_info()->addr_limit)
  38#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  39
  40#else /* not CONFIG_MMU */
  41
  42#define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
  43#define USER_DS         MAKE_MM_SEG(0xFFFFFFFF)
  44#define get_ds()        (KERNEL_DS)
  45
  46static inline mm_segment_t get_fs(void)
  47{
  48        return USER_DS;
  49}
  50
  51static inline void set_fs(mm_segment_t s)
  52{
  53}
  54
  55#endif /* not CONFIG_MMU */
  56
  57#define segment_eq(a,b) ((a).seg == (b).seg)
  58
  59#define __addr_ok(addr) \
  60        ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
  61
  62/*
  63 * Test whether a block of memory is a valid user space address.
  64 * Returns 0 if the range is valid, nonzero otherwise.
  65 *
  66 * This is equivalent to the following test:
  67 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
  68 *
  69 * This needs 33-bit arithmetic. We have a carry...
  70 */
  71#define __range_ok(addr,size) ({                                        \
  72        unsigned long flag, roksum;                                     \
  73        __chk_user_ptr(addr);                                           \
  74        asm (                                                           \
  75                "       cmpu    %1, %1    ; clear cbit\n"               \
  76                "       addx    %1, %3    ; set cbit if overflow\n"     \
  77                "       subx    %0, %0\n"                               \
  78                "       cmpu    %4, %1\n"                               \
  79                "       subx    %0, %5\n"                               \
  80                : "=&r" (flag), "=r" (roksum)                           \
  81                : "1" (addr), "r" ((int)(size)),                        \
  82                  "r" (current_thread_info()->addr_limit.seg), "r" (0)  \
  83                : "cbit" );                                             \
  84        flag; })
  85
  86/**
  87 * access_ok: - Checks if a user space pointer is valid
  88 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
  89 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  90 *        to write to a block, it is always safe to read from it.
  91 * @addr: User space pointer to start of block to check
  92 * @size: Size of block to check
  93 *
  94 * Context: User context only.  This function may sleep.
  95 *
  96 * Checks if a pointer to a block of memory in user space is valid.
  97 *
  98 * Returns true (nonzero) if the memory block may be valid, false (zero)
  99 * if it is definitely invalid.
 100 *
 101 * Note that, depending on architecture, this function probably just
 102 * checks that the pointer is in the user space range - after calling
 103 * this function, memory access functions may still return -EFAULT.
 104 */
 105#ifdef CONFIG_MMU
 106#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0))
 107#else
 108static inline int access_ok(int type, const void *addr, unsigned long size)
 109{
 110        unsigned long val = (unsigned long)addr;
 111
 112        return ((val >= memory_start) && ((val + size) < memory_end));
 113}
 114#endif /* CONFIG_MMU */
 115
 116/*
 117 * The exception table consists of pairs of addresses: the first is the
 118 * address of an instruction that is allowed to fault, and the second is
 119 * the address at which the program should continue.  No registers are
 120 * modified, so it is entirely up to the continuation code to figure out
 121 * what to do.
 122 *
 123 * All the routines below use bits of fixup code that are out of line
 124 * with the main instruction path.  This means when everything is well,
 125 * we don't even have to jump over them.  Further, they do not intrude
 126 * on our cache or tlb entries.
 127 */
 128
 129struct exception_table_entry
 130{
 131        unsigned long insn, fixup;
 132};
 133
 134extern int fixup_exception(struct pt_regs *regs);
 135
 136/*
 137 * These are the main single-value transfer routines.  They automatically
 138 * use the right size if we just have the right pointer type.
 139 *
 140 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 141 * and yet we don't want to do any pointers, because that is too much
 142 * of a performance impact. Thus we have a few rather ugly macros here,
 143 * and hide all the uglyness from the user.
 144 *
 145 * The "__xxx" versions of the user access functions are versions that
 146 * do not verify the address space, that must have been done previously
 147 * with a separate "access_ok()" call (this is used when we do multiple
 148 * accesses to the same area of user memory).
 149 */
 150
 151/* Careful: we have to cast the result to the type of the pointer for sign
 152   reasons */
 153/**
 154 * get_user: - Get a simple variable from user space.
 155 * @x:   Variable to store result.
 156 * @ptr: Source address, in user space.
 157 *
 158 * Context: User context only.  This function may sleep.
 159 *
 160 * This macro copies a single simple variable from user space to kernel
 161 * space.  It supports simple types like char and int, but not larger
 162 * data types like structures or arrays.
 163 *
 164 * @ptr must have pointer-to-simple-variable type, and the result of
 165 * dereferencing @ptr must be assignable to @x without a cast.
 166 *
 167 * Returns zero on success, or -EFAULT on error.
 168 * On error, the variable @x is set to zero.
 169 */
 170#define get_user(x,ptr)                                                 \
 171        __get_user_check((x),(ptr),sizeof(*(ptr)))
 172
 173/**
 174 * put_user: - Write a simple value into user space.
 175 * @x:   Value to copy to user space.
 176 * @ptr: Destination address, in user space.
 177 *
 178 * Context: User context only.  This function may sleep.
 179 *
 180 * This macro copies a single simple value from kernel space to user
 181 * space.  It supports simple types like char and int, but not larger
 182 * data types like structures or arrays.
 183 *
 184 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 185 * to the result of dereferencing @ptr.
 186 *
 187 * Returns zero on success, or -EFAULT on error.
 188 */
 189#define put_user(x,ptr)                                                 \
 190        __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
 191
 192/**
 193 * __get_user: - Get a simple variable from user space, with less checking.
 194 * @x:   Variable to store result.
 195 * @ptr: Source address, in user space.
 196 *
 197 * Context: User context only.  This function may sleep.
 198 *
 199 * This macro copies a single simple variable from user space to kernel
 200 * space.  It supports simple types like char and int, but not larger
 201 * data types like structures or arrays.
 202 *
 203 * @ptr must have pointer-to-simple-variable type, and the result of
 204 * dereferencing @ptr must be assignable to @x without a cast.
 205 *
 206 * Caller must check the pointer with access_ok() before calling this
 207 * function.
 208 *
 209 * Returns zero on success, or -EFAULT on error.
 210 * On error, the variable @x is set to zero.
 211 */
 212#define __get_user(x,ptr) \
 213        __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
 214
 215#define __get_user_nocheck(x,ptr,size)                                  \
 216({                                                                      \
 217        long __gu_err = 0;                                              \
 218        unsigned long __gu_val;                                         \
 219        might_sleep();                                                  \
 220        __get_user_size(__gu_val,(ptr),(size),__gu_err);                \
 221        (x) = (__typeof__(*(ptr)))__gu_val;                             \
 222        __gu_err;                                                       \
 223})
 224
 225#define __get_user_check(x,ptr,size)                                    \
 226({                                                                      \
 227        long __gu_err = -EFAULT;                                        \
 228        unsigned long __gu_val = 0;                                     \
 229        const __typeof__(*(ptr)) __user *__gu_addr = (ptr);             \
 230        might_sleep();                                                  \
 231        if (access_ok(VERIFY_READ,__gu_addr,size))                      \
 232                __get_user_size(__gu_val,__gu_addr,(size),__gu_err);    \
 233        (x) = (__typeof__(*(ptr)))__gu_val;                             \
 234        __gu_err;                                                       \
 235})
 236
 237extern long __get_user_bad(void);
 238
 239#define __get_user_size(x,ptr,size,retval)                              \
 240do {                                                                    \
 241        retval = 0;                                                     \
 242        __chk_user_ptr(ptr);                                            \
 243        switch (size) {                                                 \
 244          case 1: __get_user_asm(x,ptr,retval,"ub"); break;             \
 245          case 2: __get_user_asm(x,ptr,retval,"uh"); break;             \
 246          case 4: __get_user_asm(x,ptr,retval,""); break;               \
 247          default: (x) = __get_user_bad();                              \
 248        }                                                               \
 249} while (0)
 250
 251#define __get_user_asm(x, addr, err, itype)                             \
 252        __asm__ __volatile__(                                           \
 253                "       .fillinsn\n"                                    \
 254                "1:     ld"itype" %1,@%2\n"                             \
 255                "       .fillinsn\n"                                    \
 256                "2:\n"                                                  \
 257                ".section .fixup,\"ax\"\n"                              \
 258                "       .balign 4\n"                                    \
 259                "3:     ldi %0,%3\n"                                    \
 260                "       seth r14,#high(2b)\n"                           \
 261                "       or3 r14,r14,#low(2b)\n"                         \
 262                "       jmp r14\n"                                      \
 263                ".previous\n"                                           \
 264                ".section __ex_table,\"a\"\n"                           \
 265                "       .balign 4\n"                                    \
 266                "       .long 1b,3b\n"                                  \
 267                ".previous"                                             \
 268                : "=&r" (err), "=&r" (x)                                \
 269                : "r" (addr), "i" (-EFAULT), "0" (err)                  \
 270                : "r14", "memory")
 271
 272/**
 273 * __put_user: - Write a simple value into user space, with less checking.
 274 * @x:   Value to copy to user space.
 275 * @ptr: Destination address, in user space.
 276 *
 277 * Context: User context only.  This function may sleep.
 278 *
 279 * This macro copies a single simple value from kernel space to user
 280 * space.  It supports simple types like char and int, but not larger
 281 * data types like structures or arrays.
 282 *
 283 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 284 * to the result of dereferencing @ptr.
 285 *
 286 * Caller must check the pointer with access_ok() before calling this
 287 * function.
 288 *
 289 * Returns zero on success, or -EFAULT on error.
 290 */
 291#define __put_user(x,ptr) \
 292        __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
 293
 294
 295#define __put_user_nocheck(x,ptr,size)                                  \
 296({                                                                      \
 297        long __pu_err;                                                  \
 298        might_sleep();                                                  \
 299        __put_user_size((x),(ptr),(size),__pu_err);                     \
 300        __pu_err;                                                       \
 301})
 302
 303
 304#define __put_user_check(x,ptr,size)                                    \
 305({                                                                      \
 306        long __pu_err = -EFAULT;                                        \
 307        __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
 308        might_sleep();                                                  \
 309        if (access_ok(VERIFY_WRITE,__pu_addr,size))                     \
 310                __put_user_size((x),__pu_addr,(size),__pu_err);         \
 311        __pu_err;                                                       \
 312})
 313
 314#if defined(__LITTLE_ENDIAN__)
 315#define __put_user_u64(x, addr, err)                                    \
 316        __asm__ __volatile__(                                           \
 317                "       .fillinsn\n"                                    \
 318                "1:     st %L1,@%2\n"                                   \
 319                "       .fillinsn\n"                                    \
 320                "2:     st %H1,@(4,%2)\n"                               \
 321                "       .fillinsn\n"                                    \
 322                "3:\n"                                                  \
 323                ".section .fixup,\"ax\"\n"                              \
 324                "       .balign 4\n"                                    \
 325                "4:     ldi %0,%3\n"                                    \
 326                "       seth r14,#high(3b)\n"                           \
 327                "       or3 r14,r14,#low(3b)\n"                         \
 328                "       jmp r14\n"                                      \
 329                ".previous\n"                                           \
 330                ".section __ex_table,\"a\"\n"                           \
 331                "       .balign 4\n"                                    \
 332                "       .long 1b,4b\n"                                  \
 333                "       .long 2b,4b\n"                                  \
 334                ".previous"                                             \
 335                : "=&r" (err)                                           \
 336                : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)         \
 337                : "r14", "memory")
 338
 339#elif defined(__BIG_ENDIAN__)
 340#define __put_user_u64(x, addr, err)                                    \
 341        __asm__ __volatile__(                                           \
 342                "       .fillinsn\n"                                    \
 343                "1:     st %H1,@%2\n"                                   \
 344                "       .fillinsn\n"                                    \
 345                "2:     st %L1,@(4,%2)\n"                               \
 346                "       .fillinsn\n"                                    \
 347                "3:\n"                                                  \
 348                ".section .fixup,\"ax\"\n"                              \
 349                "       .balign 4\n"                                    \
 350                "4:     ldi %0,%3\n"                                    \
 351                "       seth r14,#high(3b)\n"                           \
 352                "       or3 r14,r14,#low(3b)\n"                         \
 353                "       jmp r14\n"                                      \
 354                ".previous\n"                                           \
 355                ".section __ex_table,\"a\"\n"                           \
 356                "       .balign 4\n"                                    \
 357                "       .long 1b,4b\n"                                  \
 358                "       .long 2b,4b\n"                                  \
 359                ".previous"                                             \
 360                : "=&r" (err)                                           \
 361                : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)         \
 362                : "r14", "memory")
 363#else
 364#error no endian defined
 365#endif
 366
 367extern void __put_user_bad(void);
 368
 369#define __put_user_size(x,ptr,size,retval)                              \
 370do {                                                                    \
 371        retval = 0;                                                     \
 372        __chk_user_ptr(ptr);                                            \
 373        switch (size) {                                                 \
 374          case 1: __put_user_asm(x,ptr,retval,"b"); break;              \
 375          case 2: __put_user_asm(x,ptr,retval,"h"); break;              \
 376          case 4: __put_user_asm(x,ptr,retval,""); break;               \
 377          case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\
 378          default: __put_user_bad();                                    \
 379        }                                                               \
 380} while (0)
 381
 382struct __large_struct { unsigned long buf[100]; };
 383#define __m(x) (*(struct __large_struct *)(x))
 384
 385/*
 386 * Tell gcc we read from memory instead of writing: this is because
 387 * we do not write to any memory gcc knows about, so there are no
 388 * aliasing issues.
 389 */
 390#define __put_user_asm(x, addr, err, itype)                             \
 391        __asm__ __volatile__(                                           \
 392                "       .fillinsn\n"                                    \
 393                "1:     st"itype" %1,@%2\n"                             \
 394                "       .fillinsn\n"                                    \
 395                "2:\n"                                                  \
 396                ".section .fixup,\"ax\"\n"                              \
 397                "       .balign 4\n"                                    \
 398                "3:     ldi %0,%3\n"                                    \
 399                "       seth r14,#high(2b)\n"                           \
 400                "       or3 r14,r14,#low(2b)\n"                         \
 401                "       jmp r14\n"                                      \
 402                ".previous\n"                                           \
 403                ".section __ex_table,\"a\"\n"                           \
 404                "       .balign 4\n"                                    \
 405                "       .long 1b,3b\n"                                  \
 406                ".previous"                                             \
 407                : "=&r" (err)                                           \
 408                : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)         \
 409                : "r14", "memory")
 410
 411/*
 412 * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
 413 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
 414 * If a store crosses a page boundary and gets a fault, the m32r will not write
 415 * anything, so this is accurate.
 416 */
 417
 418/*
 419 * Copy To/From Userspace
 420 */
 421
 422/* Generic arbitrary sized copy.  */
 423/* Return the number of bytes NOT copied.  */
 424#define __copy_user(to,from,size)                                       \
 425do {                                                                    \
 426        unsigned long __dst, __src, __c;                                \
 427        __asm__ __volatile__ (                                          \
 428                "       mv      r14, %0\n"                              \
 429                "       or      r14, %1\n"                              \
 430                "       beq     %0, %1, 9f\n"                           \
 431                "       beqz    %2, 9f\n"                               \
 432                "       and3    r14, r14, #3\n"                         \
 433                "       bnez    r14, 2f\n"                              \
 434                "       and3    %2, %2, #3\n"                           \
 435                "       beqz    %3, 2f\n"                               \
 436                "       addi    %0, #-4         ; word_copy \n"         \
 437                "       .fillinsn\n"                                    \
 438                "0:     ld      r14, @%1+\n"                            \
 439                "       addi    %3, #-1\n"                              \
 440                "       .fillinsn\n"                                    \
 441                "1:     st      r14, @+%0\n"                            \
 442                "       bnez    %3, 0b\n"                               \
 443                "       beqz    %2, 9f\n"                               \
 444                "       addi    %0, #4\n"                               \
 445                "       .fillinsn\n"                                    \
 446                "2:     ldb     r14, @%1        ; byte_copy \n"         \
 447                "       .fillinsn\n"                                    \
 448                "3:     stb     r14, @%0\n"                             \
 449                "       addi    %1, #1\n"                               \
 450                "       addi    %2, #-1\n"                              \
 451                "       addi    %0, #1\n"                               \
 452                "       bnez    %2, 2b\n"                               \
 453                "       .fillinsn\n"                                    \
 454                "9:\n"                                                  \
 455                ".section .fixup,\"ax\"\n"                              \
 456                "       .balign 4\n"                                    \
 457                "5:     addi    %3, #1\n"                               \
 458                "       addi    %1, #-4\n"                              \
 459                "       .fillinsn\n"                                    \
 460                "6:     slli    %3, #2\n"                               \
 461                "       add     %2, %3\n"                               \
 462                "       addi    %0, #4\n"                               \
 463                "       .fillinsn\n"                                    \
 464                "7:     seth    r14, #high(9b)\n"                       \
 465                "       or3     r14, r14, #low(9b)\n"                   \
 466                "       jmp     r14\n"                                  \
 467                ".previous\n"                                           \
 468                ".section __ex_table,\"a\"\n"                           \
 469                "       .balign 4\n"                                    \
 470                "       .long 0b,6b\n"                                  \
 471                "       .long 1b,5b\n"                                  \
 472                "       .long 2b,9b\n"                                  \
 473                "       .long 3b,9b\n"                                  \
 474                ".previous\n"                                           \
 475                : "=&r" (__dst), "=&r" (__src), "=&r" (size),           \
 476                  "=&r" (__c)                                           \
 477                : "0" (to), "1" (from), "2" (size), "3" (size / 4)      \
 478                : "r14", "memory");                                     \
 479} while (0)
 480
 481#define __copy_user_zeroing(to,from,size)                               \
 482do {                                                                    \
 483        unsigned long __dst, __src, __c;                                \
 484        __asm__ __volatile__ (                                          \
 485                "       mv      r14, %0\n"                              \
 486                "       or      r14, %1\n"                              \
 487                "       beq     %0, %1, 9f\n"                           \
 488                "       beqz    %2, 9f\n"                               \
 489                "       and3    r14, r14, #3\n"                         \
 490                "       bnez    r14, 2f\n"                              \
 491                "       and3    %2, %2, #3\n"                           \
 492                "       beqz    %3, 2f\n"                               \
 493                "       addi    %0, #-4         ; word_copy \n"         \
 494                "       .fillinsn\n"                                    \
 495                "0:     ld      r14, @%1+\n"                            \
 496                "       addi    %3, #-1\n"                              \
 497                "       .fillinsn\n"                                    \
 498                "1:     st      r14, @+%0\n"                            \
 499                "       bnez    %3, 0b\n"                               \
 500                "       beqz    %2, 9f\n"                               \
 501                "       addi    %0, #4\n"                               \
 502                "       .fillinsn\n"                                    \
 503                "2:     ldb     r14, @%1        ; byte_copy \n"         \
 504                "       .fillinsn\n"                                    \
 505                "3:     stb     r14, @%0\n"                             \
 506                "       addi    %1, #1\n"                               \
 507                "       addi    %2, #-1\n"                              \
 508                "       addi    %0, #1\n"                               \
 509                "       bnez    %2, 2b\n"                               \
 510                "       .fillinsn\n"                                    \
 511                "9:\n"                                                  \
 512                ".section .fixup,\"ax\"\n"                              \
 513                "       .balign 4\n"                                    \
 514                "5:     addi    %3, #1\n"                               \
 515                "       addi    %1, #-4\n"                              \
 516                "       .fillinsn\n"                                    \
 517                "6:     slli    %3, #2\n"                               \
 518                "       add     %2, %3\n"                               \
 519                "       addi    %0, #4\n"                               \
 520                "       .fillinsn\n"                                    \
 521                "7:     ldi     r14, #0         ; store zero \n"        \
 522                "       .fillinsn\n"                                    \
 523                "8:     addi    %2, #-1\n"                              \
 524                "       stb     r14, @%0        ; ACE? \n"              \
 525                "       addi    %0, #1\n"                               \
 526                "       bnez    %2, 8b\n"                               \
 527                "       seth    r14, #high(9b)\n"                       \
 528                "       or3     r14, r14, #low(9b)\n"                   \
 529                "       jmp     r14\n"                                  \
 530                ".previous\n"                                           \
 531                ".section __ex_table,\"a\"\n"                           \
 532                "       .balign 4\n"                                    \
 533                "       .long 0b,6b\n"                                  \
 534                "       .long 1b,5b\n"                                  \
 535                "       .long 2b,7b\n"                                  \
 536                "       .long 3b,7b\n"                                  \
 537                ".previous\n"                                           \
 538                : "=&r" (__dst), "=&r" (__src), "=&r" (size),           \
 539                  "=&r" (__c)                                           \
 540                : "0" (to), "1" (from), "2" (size), "3" (size / 4)      \
 541                : "r14", "memory");                                     \
 542} while (0)
 543
 544
 545/* We let the __ versions of copy_from/to_user inline, because they're often
 546 * used in fast paths and have only a small space overhead.
 547 */
 548static inline unsigned long __generic_copy_from_user_nocheck(void *to,
 549        const void __user *from, unsigned long n)
 550{
 551        __copy_user_zeroing(to,from,n);
 552        return n;
 553}
 554
 555static inline unsigned long __generic_copy_to_user_nocheck(void __user *to,
 556        const void *from, unsigned long n)
 557{
 558        __copy_user(to,from,n);
 559        return n;
 560}
 561
 562unsigned long __generic_copy_to_user(void __user *, const void *, unsigned long);
 563unsigned long __generic_copy_from_user(void *, const void __user *, unsigned long);
 564
 565/**
 566 * __copy_to_user: - Copy a block of data into user space, with less checking.
 567 * @to:   Destination address, in user space.
 568 * @from: Source address, in kernel space.
 569 * @n:    Number of bytes to copy.
 570 *
 571 * Context: User context only.  This function may sleep.
 572 *
 573 * Copy data from kernel space to user space.  Caller must check
 574 * the specified block with access_ok() before calling this function.
 575 *
 576 * Returns number of bytes that could not be copied.
 577 * On success, this will be zero.
 578 */
 579#define __copy_to_user(to,from,n)                       \
 580        __generic_copy_to_user_nocheck((to),(from),(n))
 581
 582#define __copy_to_user_inatomic __copy_to_user
 583#define __copy_from_user_inatomic __copy_from_user
 584
 585/**
 586 * copy_to_user: - Copy a block of data into user space.
 587 * @to:   Destination address, in user space.
 588 * @from: Source address, in kernel space.
 589 * @n:    Number of bytes to copy.
 590 *
 591 * Context: User context only.  This function may sleep.
 592 *
 593 * Copy data from kernel space to user space.
 594 *
 595 * Returns number of bytes that could not be copied.
 596 * On success, this will be zero.
 597 */
 598#define copy_to_user(to,from,n)                         \
 599({                                                      \
 600        might_sleep();                                  \
 601        __generic_copy_to_user((to),(from),(n));        \
 602})
 603
 604/**
 605 * __copy_from_user: - Copy a block of data from user space, with less checking. * @to:   Destination address, in kernel space.
 606 * @from: Source address, in user space.
 607 * @n:    Number of bytes to copy.
 608 *
 609 * Context: User context only.  This function may sleep.
 610 *
 611 * Copy data from user space to kernel space.  Caller must check
 612 * the specified block with access_ok() before calling this function.
 613 *
 614 * Returns number of bytes that could not be copied.
 615 * On success, this will be zero.
 616 *
 617 * If some data could not be copied, this function will pad the copied
 618 * data to the requested size using zero bytes.
 619 */
 620#define __copy_from_user(to,from,n)                     \
 621        __generic_copy_from_user_nocheck((to),(from),(n))
 622
 623/**
 624 * copy_from_user: - Copy a block of data from user space.
 625 * @to:   Destination address, in kernel space.
 626 * @from: Source address, in user space.
 627 * @n:    Number of bytes to copy.
 628 *
 629 * Context: User context only.  This function may sleep.
 630 *
 631 * Copy data from user space to kernel space.
 632 *
 633 * Returns number of bytes that could not be copied.
 634 * On success, this will be zero.
 635 *
 636 * If some data could not be copied, this function will pad the copied
 637 * data to the requested size using zero bytes.
 638 */
 639#define copy_from_user(to,from,n)                       \
 640({                                                      \
 641        might_sleep();                                  \
 642        __generic_copy_from_user((to),(from),(n));      \
 643})
 644
 645long __must_check strncpy_from_user(char *dst, const char __user *src,
 646                                long count);
 647long __must_check __strncpy_from_user(char *dst,
 648                                const char __user *src, long count);
 649
 650/**
 651 * __clear_user: - Zero a block of memory in user space, with less checking.
 652 * @to:   Destination address, in user space.
 653 * @n:    Number of bytes to zero.
 654 *
 655 * Zero a block of memory in user space.  Caller must check
 656 * the specified block with access_ok() before calling this function.
 657 *
 658 * Returns number of bytes that could not be cleared.
 659 * On success, this will be zero.
 660 */
 661unsigned long __clear_user(void __user *mem, unsigned long len);
 662
 663/**
 664 * clear_user: - Zero a block of memory in user space.
 665 * @to:   Destination address, in user space.
 666 * @n:    Number of bytes to zero.
 667 *
 668 * Zero a block of memory in user space.  Caller must check
 669 * the specified block with access_ok() before calling this function.
 670 *
 671 * Returns number of bytes that could not be cleared.
 672 * On success, this will be zero.
 673 */
 674unsigned long clear_user(void __user *mem, unsigned long len);
 675
 676/**
 677 * strlen_user: - Get the size of a string in user space.
 678 * @str: The string to measure.
 679 *
 680 * Context: User context only.  This function may sleep.
 681 *
 682 * Get the size of a NUL-terminated string in user space.
 683 *
 684 * Returns the size of the string INCLUDING the terminating NUL.
 685 * On exception, returns 0.
 686 *
 687 * If there is a limit on the length of a valid string, you may wish to
 688 * consider using strnlen_user() instead.
 689 */
 690#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
 691long strnlen_user(const char __user *str, long n);
 692
 693#endif /* _ASM_M32R_UACCESS_H */
 694