linux/arch/m32r/include/asm/uaccess.h
<<
>>
Prefs
   1#ifndef _ASM_M32R_UACCESS_H
   2#define _ASM_M32R_UACCESS_H
   3
   4/*
   5 *  linux/include/asm-m32r/uaccess.h
   6 *
   7 *  M32R version.
   8 *    Copyright (C) 2004, 2006  Hirokazu Takata <takata at linux-m32r.org>
   9 */
  10
  11/*
  12 * User space memory access functions
  13 */
  14#include <linux/errno.h>
  15#include <linux/thread_info.h>
  16#include <asm/page.h>
  17#include <asm/setup.h>
  18
  19#define VERIFY_READ 0
  20#define VERIFY_WRITE 1
  21
  22/*
  23 * The fs value determines whether argument validity checking should be
  24 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  25 * get_fs() == KERNEL_DS, checking is bypassed.
  26 *
  27 * For historical reasons, these macros are grossly misnamed.
  28 */
  29
  30#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
  31
  32#ifdef CONFIG_MMU
  33
  34#define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
  35#define USER_DS         MAKE_MM_SEG(PAGE_OFFSET)
  36#define get_ds()        (KERNEL_DS)
  37#define get_fs()        (current_thread_info()->addr_limit)
  38#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  39
  40#else /* not CONFIG_MMU */
  41
  42#define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
  43#define USER_DS         MAKE_MM_SEG(0xFFFFFFFF)
  44#define get_ds()        (KERNEL_DS)
  45
  46static inline mm_segment_t get_fs(void)
  47{
  48        return USER_DS;
  49}
  50
  51static inline void set_fs(mm_segment_t s)
  52{
  53}
  54
  55#endif /* not CONFIG_MMU */
  56
  57#define segment_eq(a, b)        ((a).seg == (b).seg)
  58
  59#define __addr_ok(addr) \
  60        ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
  61
  62/*
  63 * Test whether a block of memory is a valid user space address.
  64 * Returns 0 if the range is valid, nonzero otherwise.
  65 *
  66 * This is equivalent to the following test:
  67 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
  68 *
  69 * This needs 33-bit arithmetic. We have a carry...
  70 */
  71#define __range_ok(addr, size) ({                                       \
  72        unsigned long flag, roksum;                                     \
  73        __chk_user_ptr(addr);                                           \
  74        asm (                                                           \
  75                "       cmpu    %1, %1    ; clear cbit\n"               \
  76                "       addx    %1, %3    ; set cbit if overflow\n"     \
  77                "       subx    %0, %0\n"                               \
  78                "       cmpu    %4, %1\n"                               \
  79                "       subx    %0, %5\n"                               \
  80                : "=&r" (flag), "=r" (roksum)                           \
  81                : "1" (addr), "r" ((int)(size)),                        \
  82                  "r" (current_thread_info()->addr_limit.seg), "r" (0)  \
  83                : "cbit" );                                             \
  84        flag; })
  85
  86/**
  87 * access_ok: - Checks if a user space pointer is valid
  88 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
  89 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  90 *        to write to a block, it is always safe to read from it.
  91 * @addr: User space pointer to start of block to check
  92 * @size: Size of block to check
  93 *
  94 * Context: User context only. This function may sleep if pagefaults are
  95 *          enabled.
  96 *
  97 * Checks if a pointer to a block of memory in user space is valid.
  98 *
  99 * Returns true (nonzero) if the memory block may be valid, false (zero)
 100 * if it is definitely invalid.
 101 *
 102 * Note that, depending on architecture, this function probably just
 103 * checks that the pointer is in the user space range - after calling
 104 * this function, memory access functions may still return -EFAULT.
 105 */
 106#ifdef CONFIG_MMU
 107#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
 108#else
 109static inline int access_ok(int type, const void *addr, unsigned long size)
 110{
 111        unsigned long val = (unsigned long)addr;
 112
 113        return ((val >= memory_start) && ((val + size) < memory_end));
 114}
 115#endif /* CONFIG_MMU */
 116
 117/*
 118 * The exception table consists of pairs of addresses: the first is the
 119 * address of an instruction that is allowed to fault, and the second is
 120 * the address at which the program should continue.  No registers are
 121 * modified, so it is entirely up to the continuation code to figure out
 122 * what to do.
 123 *
 124 * All the routines below use bits of fixup code that are out of line
 125 * with the main instruction path.  This means when everything is well,
 126 * we don't even have to jump over them.  Further, they do not intrude
 127 * on our cache or tlb entries.
 128 */
 129
 130struct exception_table_entry
 131{
 132        unsigned long insn, fixup;
 133};
 134
 135extern int fixup_exception(struct pt_regs *regs);
 136
 137/*
 138 * These are the main single-value transfer routines.  They automatically
 139 * use the right size if we just have the right pointer type.
 140 *
 141 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 142 * and yet we don't want to do any pointers, because that is too much
 143 * of a performance impact. Thus we have a few rather ugly macros here,
 144 * and hide all the uglyness from the user.
 145 *
 146 * The "__xxx" versions of the user access functions are versions that
 147 * do not verify the address space, that must have been done previously
 148 * with a separate "access_ok()" call (this is used when we do multiple
 149 * accesses to the same area of user memory).
 150 */
 151
 152/* Careful: we have to cast the result to the type of the pointer for sign
 153   reasons */
 154/**
 155 * get_user: - Get a simple variable from user space.
 156 * @x:   Variable to store result.
 157 * @ptr: Source address, in user space.
 158 *
 159 * Context: User context only. This function may sleep if pagefaults are
 160 *          enabled.
 161 *
 162 * This macro copies a single simple variable from user space to kernel
 163 * space.  It supports simple types like char and int, but not larger
 164 * data types like structures or arrays.
 165 *
 166 * @ptr must have pointer-to-simple-variable type, and the result of
 167 * dereferencing @ptr must be assignable to @x without a cast.
 168 *
 169 * Returns zero on success, or -EFAULT on error.
 170 * On error, the variable @x is set to zero.
 171 */
 172#define get_user(x, ptr)                                                        \
 173        __get_user_check((x), (ptr), sizeof(*(ptr)))
 174
 175/**
 176 * put_user: - Write a simple value into user space.
 177 * @x:   Value to copy to user space.
 178 * @ptr: Destination address, in user space.
 179 *
 180 * Context: User context only. This function may sleep if pagefaults are
 181 *          enabled.
 182 *
 183 * This macro copies a single simple value from kernel space to user
 184 * space.  It supports simple types like char and int, but not larger
 185 * data types like structures or arrays.
 186 *
 187 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 188 * to the result of dereferencing @ptr.
 189 *
 190 * Returns zero on success, or -EFAULT on error.
 191 */
 192#define put_user(x, ptr)                                                        \
 193        __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 194
 195/**
 196 * __get_user: - Get a simple variable from user space, with less checking.
 197 * @x:   Variable to store result.
 198 * @ptr: Source address, in user space.
 199 *
 200 * Context: User context only. This function may sleep if pagefaults are
 201 *          enabled.
 202 *
 203 * This macro copies a single simple variable from user space to kernel
 204 * space.  It supports simple types like char and int, but not larger
 205 * data types like structures or arrays.
 206 *
 207 * @ptr must have pointer-to-simple-variable type, and the result of
 208 * dereferencing @ptr must be assignable to @x without a cast.
 209 *
 210 * Caller must check the pointer with access_ok() before calling this
 211 * function.
 212 *
 213 * Returns zero on success, or -EFAULT on error.
 214 * On error, the variable @x is set to zero.
 215 */
 216#define __get_user(x, ptr) \
 217        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 218
 219#define __get_user_nocheck(x, ptr, size)                                \
 220({                                                                      \
 221        long __gu_err = 0;                                              \
 222        unsigned long __gu_val = 0;                                     \
 223        might_fault();                                                  \
 224        __get_user_size(__gu_val, (ptr), (size), __gu_err);             \
 225        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 226        __gu_err;                                                       \
 227})
 228
 229#define __get_user_check(x, ptr, size)                                  \
 230({                                                                      \
 231        long __gu_err = -EFAULT;                                        \
 232        unsigned long __gu_val = 0;                                     \
 233        const __typeof__(*(ptr)) __user *__gu_addr = (ptr);             \
 234        might_fault();                                                  \
 235        if (access_ok(VERIFY_READ, __gu_addr, size))                    \
 236                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
 237        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 238        __gu_err;                                                       \
 239})
 240
 241extern long __get_user_bad(void);
 242
 243#define __get_user_size(x, ptr, size, retval)                           \
 244do {                                                                    \
 245        retval = 0;                                                     \
 246        __chk_user_ptr(ptr);                                            \
 247        switch (size) {                                                 \
 248          case 1: __get_user_asm(x, ptr, retval, "ub"); break;          \
 249          case 2: __get_user_asm(x, ptr, retval, "uh"); break;          \
 250          case 4: __get_user_asm(x, ptr, retval, ""); break;            \
 251          default: (x) = __get_user_bad();                              \
 252        }                                                               \
 253} while (0)
 254
 255#define __get_user_asm(x, addr, err, itype)                             \
 256        __asm__ __volatile__(                                           \
 257                "       .fillinsn\n"                                    \
 258                "1:     ld"itype" %1,@%2\n"                             \
 259                "       .fillinsn\n"                                    \
 260                "2:\n"                                                  \
 261                ".section .fixup,\"ax\"\n"                              \
 262                "       .balign 4\n"                                    \
 263                "3:     ldi %0,%3\n"                                    \
 264                "       seth r14,#high(2b)\n"                           \
 265                "       or3 r14,r14,#low(2b)\n"                         \
 266                "       jmp r14\n"                                      \
 267                ".previous\n"                                           \
 268                ".section __ex_table,\"a\"\n"                           \
 269                "       .balign 4\n"                                    \
 270                "       .long 1b,3b\n"                                  \
 271                ".previous"                                             \
 272                : "=&r" (err), "=&r" (x)                                \
 273                : "r" (addr), "i" (-EFAULT), "0" (err)                  \
 274                : "r14", "memory")
 275
 276/**
 277 * __put_user: - Write a simple value into user space, with less checking.
 278 * @x:   Value to copy to user space.
 279 * @ptr: Destination address, in user space.
 280 *
 281 * Context: User context only. This function may sleep if pagefaults are
 282 *          enabled.
 283 *
 284 * This macro copies a single simple value from kernel space to user
 285 * space.  It supports simple types like char and int, but not larger
 286 * data types like structures or arrays.
 287 *
 288 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 289 * to the result of dereferencing @ptr.
 290 *
 291 * Caller must check the pointer with access_ok() before calling this
 292 * function.
 293 *
 294 * Returns zero on success, or -EFAULT on error.
 295 */
 296#define __put_user(x, ptr) \
 297        __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 298
 299
 300#define __put_user_nocheck(x, ptr, size)                                \
 301({                                                                      \
 302        long __pu_err;                                                  \
 303        might_fault();                                                  \
 304        __put_user_size((x), (ptr), (size), __pu_err);                  \
 305        __pu_err;                                                       \
 306})
 307
 308
 309#define __put_user_check(x, ptr, size)                                  \
 310({                                                                      \
 311        long __pu_err = -EFAULT;                                        \
 312        __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
 313        might_fault();                                                  \
 314        if (access_ok(VERIFY_WRITE, __pu_addr, size))                   \
 315                __put_user_size((x), __pu_addr, (size), __pu_err);      \
 316        __pu_err;                                                       \
 317})
 318
 319#if defined(__LITTLE_ENDIAN__)
 320#define __put_user_u64(x, addr, err)                                    \
 321        __asm__ __volatile__(                                           \
 322                "       .fillinsn\n"                                    \
 323                "1:     st %L1,@%2\n"                                   \
 324                "       .fillinsn\n"                                    \
 325                "2:     st %H1,@(4,%2)\n"                               \
 326                "       .fillinsn\n"                                    \
 327                "3:\n"                                                  \
 328                ".section .fixup,\"ax\"\n"                              \
 329                "       .balign 4\n"                                    \
 330                "4:     ldi %0,%3\n"                                    \
 331                "       seth r14,#high(3b)\n"                           \
 332                "       or3 r14,r14,#low(3b)\n"                         \
 333                "       jmp r14\n"                                      \
 334                ".previous\n"                                           \
 335                ".section __ex_table,\"a\"\n"                           \
 336                "       .balign 4\n"                                    \
 337                "       .long 1b,4b\n"                                  \
 338                "       .long 2b,4b\n"                                  \
 339                ".previous"                                             \
 340                : "=&r" (err)                                           \
 341                : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)         \
 342                : "r14", "memory")
 343
 344#elif defined(__BIG_ENDIAN__)
 345#define __put_user_u64(x, addr, err)                                    \
 346        __asm__ __volatile__(                                           \
 347                "       .fillinsn\n"                                    \
 348                "1:     st %H1,@%2\n"                                   \
 349                "       .fillinsn\n"                                    \
 350                "2:     st %L1,@(4,%2)\n"                               \
 351                "       .fillinsn\n"                                    \
 352                "3:\n"                                                  \
 353                ".section .fixup,\"ax\"\n"                              \
 354                "       .balign 4\n"                                    \
 355                "4:     ldi %0,%3\n"                                    \
 356                "       seth r14,#high(3b)\n"                           \
 357                "       or3 r14,r14,#low(3b)\n"                         \
 358                "       jmp r14\n"                                      \
 359                ".previous\n"                                           \
 360                ".section __ex_table,\"a\"\n"                           \
 361                "       .balign 4\n"                                    \
 362                "       .long 1b,4b\n"                                  \
 363                "       .long 2b,4b\n"                                  \
 364                ".previous"                                             \
 365                : "=&r" (err)                                           \
 366                : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)         \
 367                : "r14", "memory")
 368#else
 369#error no endian defined
 370#endif
 371
 372extern void __put_user_bad(void);
 373
 374#define __put_user_size(x, ptr, size, retval)                           \
 375do {                                                                    \
 376        retval = 0;                                                     \
 377        __chk_user_ptr(ptr);                                            \
 378        switch (size) {                                                 \
 379          case 1: __put_user_asm(x, ptr, retval, "b"); break;           \
 380          case 2: __put_user_asm(x, ptr, retval, "h"); break;           \
 381          case 4: __put_user_asm(x, ptr, retval, ""); break;            \
 382          case 8: __put_user_u64((__typeof__(*ptr))(x), ptr, retval); break;\
 383          default: __put_user_bad();                                    \
 384        }                                                               \
 385} while (0)
 386
 387struct __large_struct { unsigned long buf[100]; };
 388#define __m(x) (*(struct __large_struct *)(x))
 389
 390/*
 391 * Tell gcc we read from memory instead of writing: this is because
 392 * we do not write to any memory gcc knows about, so there are no
 393 * aliasing issues.
 394 */
 395#define __put_user_asm(x, addr, err, itype)                             \
 396        __asm__ __volatile__(                                           \
 397                "       .fillinsn\n"                                    \
 398                "1:     st"itype" %1,@%2\n"                             \
 399                "       .fillinsn\n"                                    \
 400                "2:\n"                                                  \
 401                ".section .fixup,\"ax\"\n"                              \
 402                "       .balign 4\n"                                    \
 403                "3:     ldi %0,%3\n"                                    \
 404                "       seth r14,#high(2b)\n"                           \
 405                "       or3 r14,r14,#low(2b)\n"                         \
 406                "       jmp r14\n"                                      \
 407                ".previous\n"                                           \
 408                ".section __ex_table,\"a\"\n"                           \
 409                "       .balign 4\n"                                    \
 410                "       .long 1b,3b\n"                                  \
 411                ".previous"                                             \
 412                : "=&r" (err)                                           \
 413                : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)         \
 414                : "r14", "memory")
 415
 416/*
 417 * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
 418 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
 419 * If a store crosses a page boundary and gets a fault, the m32r will not write
 420 * anything, so this is accurate.
 421 */
 422
 423/*
 424 * Copy To/From Userspace
 425 */
 426
 427/* Generic arbitrary sized copy.  */
 428/* Return the number of bytes NOT copied.  */
 429#define __copy_user(to, from, size)                                     \
 430do {                                                                    \
 431        unsigned long __dst, __src, __c;                                \
 432        __asm__ __volatile__ (                                          \
 433                "       mv      r14, %0\n"                              \
 434                "       or      r14, %1\n"                              \
 435                "       beq     %0, %1, 9f\n"                           \
 436                "       beqz    %2, 9f\n"                               \
 437                "       and3    r14, r14, #3\n"                         \
 438                "       bnez    r14, 2f\n"                              \
 439                "       and3    %2, %2, #3\n"                           \
 440                "       beqz    %3, 2f\n"                               \
 441                "       addi    %0, #-4         ; word_copy \n"         \
 442                "       .fillinsn\n"                                    \
 443                "0:     ld      r14, @%1+\n"                            \
 444                "       addi    %3, #-1\n"                              \
 445                "       .fillinsn\n"                                    \
 446                "1:     st      r14, @+%0\n"                            \
 447                "       bnez    %3, 0b\n"                               \
 448                "       beqz    %2, 9f\n"                               \
 449                "       addi    %0, #4\n"                               \
 450                "       .fillinsn\n"                                    \
 451                "2:     ldb     r14, @%1        ; byte_copy \n"         \
 452                "       .fillinsn\n"                                    \
 453                "3:     stb     r14, @%0\n"                             \
 454                "       addi    %1, #1\n"                               \
 455                "       addi    %2, #-1\n"                              \
 456                "       addi    %0, #1\n"                               \
 457                "       bnez    %2, 2b\n"                               \
 458                "       .fillinsn\n"                                    \
 459                "9:\n"                                                  \
 460                ".section .fixup,\"ax\"\n"                              \
 461                "       .balign 4\n"                                    \
 462                "5:     addi    %3, #1\n"                               \
 463                "       addi    %1, #-4\n"                              \
 464                "       .fillinsn\n"                                    \
 465                "6:     slli    %3, #2\n"                               \
 466                "       add     %2, %3\n"                               \
 467                "       addi    %0, #4\n"                               \
 468                "       .fillinsn\n"                                    \
 469                "7:     seth    r14, #high(9b)\n"                       \
 470                "       or3     r14, r14, #low(9b)\n"                   \
 471                "       jmp     r14\n"                                  \
 472                ".previous\n"                                           \
 473                ".section __ex_table,\"a\"\n"                           \
 474                "       .balign 4\n"                                    \
 475                "       .long 0b,6b\n"                                  \
 476                "       .long 1b,5b\n"                                  \
 477                "       .long 2b,9b\n"                                  \
 478                "       .long 3b,9b\n"                                  \
 479                ".previous\n"                                           \
 480                : "=&r" (__dst), "=&r" (__src), "=&r" (size),           \
 481                  "=&r" (__c)                                           \
 482                : "0" (to), "1" (from), "2" (size), "3" (size / 4)      \
 483                : "r14", "memory");                                     \
 484} while (0)
 485
 486#define __copy_user_zeroing(to, from, size)                             \
 487do {                                                                    \
 488        unsigned long __dst, __src, __c;                                \
 489        __asm__ __volatile__ (                                          \
 490                "       mv      r14, %0\n"                              \
 491                "       or      r14, %1\n"                              \
 492                "       beq     %0, %1, 9f\n"                           \
 493                "       beqz    %2, 9f\n"                               \
 494                "       and3    r14, r14, #3\n"                         \
 495                "       bnez    r14, 2f\n"                              \
 496                "       and3    %2, %2, #3\n"                           \
 497                "       beqz    %3, 2f\n"                               \
 498                "       addi    %0, #-4         ; word_copy \n"         \
 499                "       .fillinsn\n"                                    \
 500                "0:     ld      r14, @%1+\n"                            \
 501                "       addi    %3, #-1\n"                              \
 502                "       .fillinsn\n"                                    \
 503                "1:     st      r14, @+%0\n"                            \
 504                "       bnez    %3, 0b\n"                               \
 505                "       beqz    %2, 9f\n"                               \
 506                "       addi    %0, #4\n"                               \
 507                "       .fillinsn\n"                                    \
 508                "2:     ldb     r14, @%1        ; byte_copy \n"         \
 509                "       .fillinsn\n"                                    \
 510                "3:     stb     r14, @%0\n"                             \
 511                "       addi    %1, #1\n"                               \
 512                "       addi    %2, #-1\n"                              \
 513                "       addi    %0, #1\n"                               \
 514                "       bnez    %2, 2b\n"                               \
 515                "       .fillinsn\n"                                    \
 516                "9:\n"                                                  \
 517                ".section .fixup,\"ax\"\n"                              \
 518                "       .balign 4\n"                                    \
 519                "5:     addi    %3, #1\n"                               \
 520                "       addi    %1, #-4\n"                              \
 521                "       .fillinsn\n"                                    \
 522                "6:     slli    %3, #2\n"                               \
 523                "       add     %2, %3\n"                               \
 524                "       addi    %0, #4\n"                               \
 525                "       .fillinsn\n"                                    \
 526                "7:     ldi     r14, #0         ; store zero \n"        \
 527                "       .fillinsn\n"                                    \
 528                "8:     addi    %2, #-1\n"                              \
 529                "       stb     r14, @%0        ; ACE? \n"              \
 530                "       addi    %0, #1\n"                               \
 531                "       bnez    %2, 8b\n"                               \
 532                "       seth    r14, #high(9b)\n"                       \
 533                "       or3     r14, r14, #low(9b)\n"                   \
 534                "       jmp     r14\n"                                  \
 535                ".previous\n"                                           \
 536                ".section __ex_table,\"a\"\n"                           \
 537                "       .balign 4\n"                                    \
 538                "       .long 0b,6b\n"                                  \
 539                "       .long 1b,5b\n"                                  \
 540                "       .long 2b,7b\n"                                  \
 541                "       .long 3b,7b\n"                                  \
 542                ".previous\n"                                           \
 543                : "=&r" (__dst), "=&r" (__src), "=&r" (size),           \
 544                  "=&r" (__c)                                           \
 545                : "0" (to), "1" (from), "2" (size), "3" (size / 4)      \
 546                : "r14", "memory");                                     \
 547} while (0)
 548
 549
 550/* We let the __ versions of copy_from/to_user inline, because they're often
 551 * used in fast paths and have only a small space overhead.
 552 */
 553static inline unsigned long __generic_copy_from_user_nocheck(void *to,
 554        const void __user *from, unsigned long n)
 555{
 556        __copy_user_zeroing(to, from, n);
 557        return n;
 558}
 559
 560static inline unsigned long __generic_copy_to_user_nocheck(void __user *to,
 561        const void *from, unsigned long n)
 562{
 563        __copy_user(to, from, n);
 564        return n;
 565}
 566
 567unsigned long __generic_copy_to_user(void __user *, const void *, unsigned long);
 568unsigned long __generic_copy_from_user(void *, const void __user *, unsigned long);
 569
 570/**
 571 * __copy_to_user: - Copy a block of data into user space, with less checking.
 572 * @to:   Destination address, in user space.
 573 * @from: Source address, in kernel space.
 574 * @n:    Number of bytes to copy.
 575 *
 576 * Context: User context only. This function may sleep if pagefaults are
 577 *          enabled.
 578 *
 579 * Copy data from kernel space to user space.  Caller must check
 580 * the specified block with access_ok() before calling this function.
 581 *
 582 * Returns number of bytes that could not be copied.
 583 * On success, this will be zero.
 584 */
 585#define __copy_to_user(to, from, n)                     \
 586        __generic_copy_to_user_nocheck((to), (from), (n))
 587
 588#define __copy_to_user_inatomic __copy_to_user
 589#define __copy_from_user_inatomic __copy_from_user
 590
 591/**
 592 * copy_to_user: - Copy a block of data into user space.
 593 * @to:   Destination address, in user space.
 594 * @from: Source address, in kernel space.
 595 * @n:    Number of bytes to copy.
 596 *
 597 * Context: User context only. This function may sleep if pagefaults are
 598 *          enabled.
 599 *
 600 * Copy data from kernel space to user space.
 601 *
 602 * Returns number of bytes that could not be copied.
 603 * On success, this will be zero.
 604 */
 605#define copy_to_user(to, from, n)                       \
 606({                                                      \
 607        might_fault();                                  \
 608        __generic_copy_to_user((to), (from), (n));      \
 609})
 610
 611/**
 612 * __copy_from_user: - Copy a block of data from user space, with less checking. * @to:   Destination address, in kernel space.
 613 * @from: Source address, in user space.
 614 * @n:    Number of bytes to copy.
 615 *
 616 * Context: User context only. This function may sleep if pagefaults are
 617 *          enabled.
 618 *
 619 * Copy data from user space to kernel space.  Caller must check
 620 * the specified block with access_ok() before calling this function.
 621 *
 622 * Returns number of bytes that could not be copied.
 623 * On success, this will be zero.
 624 *
 625 * If some data could not be copied, this function will pad the copied
 626 * data to the requested size using zero bytes.
 627 */
 628#define __copy_from_user(to, from, n)                   \
 629        __generic_copy_from_user_nocheck((to), (from), (n))
 630
 631/**
 632 * copy_from_user: - Copy a block of data from user space.
 633 * @to:   Destination address, in kernel space.
 634 * @from: Source address, in user space.
 635 * @n:    Number of bytes to copy.
 636 *
 637 * Context: User context only. This function may sleep if pagefaults are
 638 *          enabled.
 639 *
 640 * Copy data from user space to kernel space.
 641 *
 642 * Returns number of bytes that could not be copied.
 643 * On success, this will be zero.
 644 *
 645 * If some data could not be copied, this function will pad the copied
 646 * data to the requested size using zero bytes.
 647 */
 648#define copy_from_user(to, from, n)                     \
 649({                                                      \
 650        might_fault();                                  \
 651        __generic_copy_from_user((to), (from), (n));    \
 652})
 653
 654long __must_check strncpy_from_user(char *dst, const char __user *src,
 655                                long count);
 656long __must_check __strncpy_from_user(char *dst,
 657                                const char __user *src, long count);
 658
 659/**
 660 * __clear_user: - Zero a block of memory in user space, with less checking.
 661 * @to:   Destination address, in user space.
 662 * @n:    Number of bytes to zero.
 663 *
 664 * Zero a block of memory in user space.  Caller must check
 665 * the specified block with access_ok() before calling this function.
 666 *
 667 * Returns number of bytes that could not be cleared.
 668 * On success, this will be zero.
 669 */
 670unsigned long __clear_user(void __user *mem, unsigned long len);
 671
 672/**
 673 * clear_user: - Zero a block of memory in user space.
 674 * @to:   Destination address, in user space.
 675 * @n:    Number of bytes to zero.
 676 *
 677 * Zero a block of memory in user space.  Caller must check
 678 * the specified block with access_ok() before calling this function.
 679 *
 680 * Returns number of bytes that could not be cleared.
 681 * On success, this will be zero.
 682 */
 683unsigned long clear_user(void __user *mem, unsigned long len);
 684
 685/**
 686 * strlen_user: - Get the size of a string in user space.
 687 * @str: The string to measure.
 688 *
 689 * Context: User context only. This function may sleep if pagefaults are
 690 *          enabled.
 691 *
 692 * Get the size of a NUL-terminated string in user space.
 693 *
 694 * Returns the size of the string INCLUDING the terminating NUL.
 695 * On exception, returns 0.
 696 *
 697 * If there is a limit on the length of a valid string, you may wish to
 698 * consider using strnlen_user() instead.
 699 */
 700#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
 701long strnlen_user(const char __user *str, long n);
 702
 703#endif /* _ASM_M32R_UACCESS_H */
 704