linux/arch/tile/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#ifndef _ASM_TILE_UACCESS_H
  16#define _ASM_TILE_UACCESS_H
  17
  18/*
  19 * User space memory access functions
  20 */
  21#include <linux/sched.h>
  22#include <linux/mm.h>
  23#include <asm-generic/uaccess-unaligned.h>
  24#include <asm/processor.h>
  25#include <asm/page.h>
  26
  27#define VERIFY_READ     0
  28#define VERIFY_WRITE    1
  29
  30/*
  31 * The fs value determines whether argument validity checking should be
  32 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  33 * get_fs() == KERNEL_DS, checking is bypassed.
  34 *
  35 * For historical reasons, these macros are grossly misnamed.
  36 */
  37#define MAKE_MM_SEG(a)  ((mm_segment_t) { (a) })
  38
  39#define KERNEL_DS       MAKE_MM_SEG(-1UL)
  40#define USER_DS         MAKE_MM_SEG(PAGE_OFFSET)
  41
  42#define get_ds()        (KERNEL_DS)
  43#define get_fs()        (current_thread_info()->addr_limit)
  44#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  45
  46#define segment_eq(a, b) ((a).seg == (b).seg)
  47
  48#ifndef __tilegx__
  49/*
  50 * We could allow mapping all 16 MB at 0xfc000000, but we set up a
  51 * special hack in arch_setup_additional_pages() to auto-create a mapping
  52 * for the first 16 KB, and it would seem strange to have different
  53 * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000.
  54 */
  55static inline int is_arch_mappable_range(unsigned long addr,
  56                                         unsigned long size)
  57{
  58        return (addr >= MEM_USER_INTRPT &&
  59                addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
  60                size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
  61}
  62#define is_arch_mappable_range is_arch_mappable_range
  63#else
  64#define is_arch_mappable_range(addr, size) 0
  65#endif
  66
  67/*
  68 * Test whether a block of memory is a valid user space address.
  69 * Returns 0 if the range is valid, nonzero otherwise.
  70 */
  71int __range_ok(unsigned long addr, unsigned long size);
  72
  73/**
  74 * access_ok: - Checks if a user space pointer is valid
  75 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
  76 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  77 *        to write to a block, it is always safe to read from it.
  78 * @addr: User space pointer to start of block to check
  79 * @size: Size of block to check
  80 *
  81 * Context: User context only.  This function may sleep.
  82 *
  83 * Checks if a pointer to a block of memory in user space is valid.
  84 *
  85 * Returns true (nonzero) if the memory block may be valid, false (zero)
  86 * if it is definitely invalid.
  87 *
  88 * Note that, depending on architecture, this function probably just
  89 * checks that the pointer is in the user space range - after calling
  90 * this function, memory access functions may still return -EFAULT.
  91 */
  92#define access_ok(type, addr, size) ({ \
  93        __chk_user_ptr(addr); \
  94        likely(__range_ok((unsigned long)(addr), (size)) == 0); \
  95})
  96
  97/*
  98 * The exception table consists of pairs of addresses: the first is the
  99 * address of an instruction that is allowed to fault, and the second is
 100 * the address at which the program should continue.  No registers are
 101 * modified, so it is entirely up to the continuation code to figure out
 102 * what to do.
 103 *
 104 * All the routines below use bits of fixup code that are out of line
 105 * with the main instruction path.  This means when everything is well,
 106 * we don't even have to jump over them.  Further, they do not intrude
 107 * on our cache or tlb entries.
 108 */
 109
 110struct exception_table_entry {
 111        unsigned long insn, fixup;
 112};
 113
 114extern int fixup_exception(struct pt_regs *regs);
 115
 116/*
 117 * This is a type: either unsigned long, if the argument fits into
 118 * that type, or otherwise unsigned long long.
 119 */
 120#define __inttype(x) \
 121        __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 122
 123/*
 124 * Support macros for __get_user().
 125 * Note that __get_user() and __put_user() assume proper alignment.
 126 */
 127
 128#ifdef __LP64__
 129#define _ASM_PTR        ".quad"
 130#define _ASM_ALIGN      ".align 8"
 131#else
 132#define _ASM_PTR        ".long"
 133#define _ASM_ALIGN      ".align 4"
 134#endif
 135
 136#define __get_user_asm(OP, x, ptr, ret)                                 \
 137        asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"              \
 138                     ".pushsection .fixup,\"ax\"\n"                     \
 139                     "0: { movei %1, 0; movei %0, %3 }\n"               \
 140                     "j 9f\n"                                           \
 141                     ".section __ex_table,\"a\"\n"                      \
 142                     _ASM_ALIGN "\n"                                    \
 143                     _ASM_PTR " 1b, 0b\n"                               \
 144                     ".popsection\n"                                    \
 145                     "9:"                                               \
 146                     : "=r" (ret), "=r" (x)                             \
 147                     : "r" (ptr), "i" (-EFAULT))
 148
 149#ifdef __tilegx__
 150#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
 151#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
 152#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
 153#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
 154#else
 155#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
 156#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
 157#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
 158#ifdef __LITTLE_ENDIAN
 159#define __lo32(a, b) a
 160#define __hi32(a, b) b
 161#else
 162#define __lo32(a, b) b
 163#define __hi32(a, b) a
 164#endif
 165#define __get_user_8(x, ptr, ret)                                       \
 166        ({                                                              \
 167                unsigned int __a, __b;                                  \
 168                asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n"       \
 169                             "2: { lw %2, %2; movei %0, 0 }\n"          \
 170                             ".pushsection .fixup,\"ax\"\n"             \
 171                             "0: { movei %1, 0; movei %2, 0 }\n"        \
 172                             "{ movei %0, %4; j 9f }\n"                 \
 173                             ".section __ex_table,\"a\"\n"              \
 174                             ".align 4\n"                               \
 175                             ".word 1b, 0b\n"                           \
 176                             ".word 2b, 0b\n"                           \
 177                             ".popsection\n"                            \
 178                             "9:"                                       \
 179                             : "=r" (ret), "=r" (__a), "=&r" (__b)      \
 180                             : "r" (ptr), "i" (-EFAULT));               \
 181                (x) = (__force __typeof(x))(__inttype(x))               \
 182                        (((u64)__hi32(__a, __b) << 32) |                \
 183                         __lo32(__a, __b));                             \
 184        })
 185#endif
 186
 187extern int __get_user_bad(void)
 188  __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
 189
 190/**
 191 * __get_user: - Get a simple variable from user space, with less checking.
 192 * @x:   Variable to store result.
 193 * @ptr: Source address, in user space.
 194 *
 195 * Context: User context only.  This function may sleep.
 196 *
 197 * This macro copies a single simple variable from user space to kernel
 198 * space.  It supports simple types like char and int, but not larger
 199 * data types like structures or arrays.
 200 *
 201 * @ptr must have pointer-to-simple-variable type, and the result of
 202 * dereferencing @ptr must be assignable to @x without a cast.
 203 *
 204 * Returns zero on success, or -EFAULT on error.
 205 * On error, the variable @x is set to zero.
 206 *
 207 * Caller must check the pointer with access_ok() before calling this
 208 * function.
 209 */
 210#define __get_user(x, ptr)                                              \
 211        ({                                                              \
 212                int __ret;                                              \
 213                typeof(x) _x;                                           \
 214                __chk_user_ptr(ptr);                                    \
 215                switch (sizeof(*(ptr))) {                               \
 216                case 1: __get_user_1(_x, ptr, __ret); break;            \
 217                case 2: __get_user_2(_x, ptr, __ret); break;            \
 218                case 4: __get_user_4(_x, ptr, __ret); break;            \
 219                case 8: __get_user_8(_x, ptr, __ret); break;            \
 220                default: __ret = __get_user_bad(); break;               \
 221                }                                                       \
 222                (x) = (typeof(*(ptr))) _x;                              \
 223                __ret;                                                  \
 224        })
 225
 226/* Support macros for __put_user(). */
 227
 228#define __put_user_asm(OP, x, ptr, ret)                 \
 229        asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"              \
 230                     ".pushsection .fixup,\"ax\"\n"                     \
 231                     "0: { movei %0, %3; j 9f }\n"                      \
 232                     ".section __ex_table,\"a\"\n"                      \
 233                     _ASM_ALIGN "\n"                                    \
 234                     _ASM_PTR " 1b, 0b\n"                               \
 235                     ".popsection\n"                                    \
 236                     "9:"                                               \
 237                     : "=r" (ret)                                       \
 238                     : "r" (ptr), "r" (x), "i" (-EFAULT))
 239
 240#ifdef __tilegx__
 241#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
 242#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
 243#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
 244#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
 245#else
 246#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
 247#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
 248#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
 249#define __put_user_8(x, ptr, ret)                                       \
 250        ({                                                              \
 251                u64 __x = (__force __inttype(x))(x);                    \
 252                int __lo = (int) __x, __hi = (int) (__x >> 32);         \
 253                asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n"       \
 254                             "2: { sw %0, %3; movei %0, 0 }\n"          \
 255                             ".pushsection .fixup,\"ax\"\n"             \
 256                             "0: { movei %0, %4; j 9f }\n"              \
 257                             ".section __ex_table,\"a\"\n"              \
 258                             ".align 4\n"                               \
 259                             ".word 1b, 0b\n"                           \
 260                             ".word 2b, 0b\n"                           \
 261                             ".popsection\n"                            \
 262                             "9:"                                       \
 263                             : "=&r" (ret)                              \
 264                             : "r" (ptr), "r" (__lo32(__lo, __hi)),     \
 265                             "r" (__hi32(__lo, __hi)), "i" (-EFAULT));  \
 266        })
 267#endif
 268
 269extern int __put_user_bad(void)
 270  __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
 271
 272/**
 273 * __put_user: - Write a simple value into user space, with less checking.
 274 * @x:   Value to copy to user space.
 275 * @ptr: Destination address, in user space.
 276 *
 277 * Context: User context only.  This function may sleep.
 278 *
 279 * This macro copies a single simple value from kernel space to user
 280 * space.  It supports simple types like char and int, but not larger
 281 * data types like structures or arrays.
 282 *
 283 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 284 * to the result of dereferencing @ptr.
 285 *
 286 * Caller must check the pointer with access_ok() before calling this
 287 * function.
 288 *
 289 * Returns zero on success, or -EFAULT on error.
 290 */
 291#define __put_user(x, ptr)                                              \
 292({                                                                      \
 293        int __ret;                                                      \
 294        typeof(*(ptr)) _x = (x);                                        \
 295        __chk_user_ptr(ptr);                                            \
 296        switch (sizeof(*(ptr))) {                                       \
 297        case 1: __put_user_1(_x, ptr, __ret); break;                    \
 298        case 2: __put_user_2(_x, ptr, __ret); break;                    \
 299        case 4: __put_user_4(_x, ptr, __ret); break;                    \
 300        case 8: __put_user_8(_x, ptr, __ret); break;                    \
 301        default: __ret = __put_user_bad(); break;                       \
 302        }                                                               \
 303        __ret;                                                          \
 304})
 305
 306/*
 307 * The versions of get_user and put_user without initial underscores
 308 * check the address of their arguments to make sure they are not
 309 * in kernel space.
 310 */
 311#define put_user(x, ptr)                                                \
 312({                                                                      \
 313        __typeof__(*(ptr)) __user *__Pu_addr = (ptr);                   \
 314        access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ?    \
 315                __put_user((x), (__Pu_addr)) :                          \
 316                -EFAULT;                                                \
 317})
 318
 319#define get_user(x, ptr)                                                \
 320({                                                                      \
 321        __typeof__(*(ptr)) const __user *__Gu_addr = (ptr);             \
 322        access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ?     \
 323                __get_user((x), (__Gu_addr)) :                          \
 324                ((x) = 0, -EFAULT);                                     \
 325})
 326
 327/**
 328 * __copy_to_user() - copy data into user space, with less checking.
 329 * @to:   Destination address, in user space.
 330 * @from: Source address, in kernel space.
 331 * @n:    Number of bytes to copy.
 332 *
 333 * Context: User context only.  This function may sleep.
 334 *
 335 * Copy data from kernel space to user space.  Caller must check
 336 * the specified block with access_ok() before calling this function.
 337 *
 338 * Returns number of bytes that could not be copied.
 339 * On success, this will be zero.
 340 *
 341 * An alternate version - __copy_to_user_inatomic() - is designed
 342 * to be called from atomic context, typically bracketed by calls
 343 * to pagefault_disable() and pagefault_enable().
 344 */
 345extern unsigned long __must_check __copy_to_user_inatomic(
 346        void __user *to, const void *from, unsigned long n);
 347
 348static inline unsigned long __must_check
 349__copy_to_user(void __user *to, const void *from, unsigned long n)
 350{
 351        might_fault();
 352        return __copy_to_user_inatomic(to, from, n);
 353}
 354
 355static inline unsigned long __must_check
 356copy_to_user(void __user *to, const void *from, unsigned long n)
 357{
 358        if (access_ok(VERIFY_WRITE, to, n))
 359                n = __copy_to_user(to, from, n);
 360        return n;
 361}
 362
 363/**
 364 * __copy_from_user() - copy data from user space, with less checking.
 365 * @to:   Destination address, in kernel space.
 366 * @from: Source address, in user space.
 367 * @n:    Number of bytes to copy.
 368 *
 369 * Context: User context only.  This function may sleep.
 370 *
 371 * Copy data from user space to kernel space.  Caller must check
 372 * the specified block with access_ok() before calling this function.
 373 *
 374 * Returns number of bytes that could not be copied.
 375 * On success, this will be zero.
 376 *
 377 * If some data could not be copied, this function will pad the copied
 378 * data to the requested size using zero bytes.
 379 *
 380 * An alternate version - __copy_from_user_inatomic() - is designed
 381 * to be called from atomic context, typically bracketed by calls
 382 * to pagefault_disable() and pagefault_enable().  This version
 383 * does *NOT* pad with zeros.
 384 */
 385extern unsigned long __must_check __copy_from_user_inatomic(
 386        void *to, const void __user *from, unsigned long n);
 387extern unsigned long __must_check __copy_from_user_zeroing(
 388        void *to, const void __user *from, unsigned long n);
 389
 390static inline unsigned long __must_check
 391__copy_from_user(void *to, const void __user *from, unsigned long n)
 392{
 393       might_fault();
 394       return __copy_from_user_zeroing(to, from, n);
 395}
 396
 397static inline unsigned long __must_check
 398_copy_from_user(void *to, const void __user *from, unsigned long n)
 399{
 400        if (access_ok(VERIFY_READ, from, n))
 401                n = __copy_from_user(to, from, n);
 402        else
 403                memset(to, 0, n);
 404        return n;
 405}
 406
 407#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
 408/*
 409 * There are still unprovable places in the generic code as of 2.6.34, so this
 410 * option is not really compatible with -Werror, which is more useful in
 411 * general.
 412 */
 413extern void copy_from_user_overflow(void)
 414        __compiletime_warning("copy_from_user() size is not provably correct");
 415
 416static inline unsigned long __must_check copy_from_user(void *to,
 417                                          const void __user *from,
 418                                          unsigned long n)
 419{
 420        int sz = __compiletime_object_size(to);
 421
 422        if (likely(sz == -1 || sz >= n))
 423                n = _copy_from_user(to, from, n);
 424        else
 425                copy_from_user_overflow();
 426
 427        return n;
 428}
 429#else
 430#define copy_from_user _copy_from_user
 431#endif
 432
 433#ifdef __tilegx__
 434/**
 435 * __copy_in_user() - copy data within user space, with less checking.
 436 * @to:   Destination address, in user space.
 437 * @from: Source address, in user space.
 438 * @n:    Number of bytes to copy.
 439 *
 440 * Context: User context only.  This function may sleep.
 441 *
 442 * Copy data from user space to user space.  Caller must check
 443 * the specified blocks with access_ok() before calling this function.
 444 *
 445 * Returns number of bytes that could not be copied.
 446 * On success, this will be zero.
 447 */
 448extern unsigned long __copy_in_user_inatomic(
 449        void __user *to, const void __user *from, unsigned long n);
 450
 451static inline unsigned long __must_check
 452__copy_in_user(void __user *to, const void __user *from, unsigned long n)
 453{
 454        might_fault();
 455        return __copy_in_user_inatomic(to, from, n);
 456}
 457
 458static inline unsigned long __must_check
 459copy_in_user(void __user *to, const void __user *from, unsigned long n)
 460{
 461        if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
 462                n = __copy_in_user(to, from, n);
 463        return n;
 464}
 465#endif
 466
 467
 468/**
 469 * strlen_user: - Get the size of a string in user space.
 470 * @str: The string to measure.
 471 *
 472 * Context: User context only.  This function may sleep.
 473 *
 474 * Get the size of a NUL-terminated string in user space.
 475 *
 476 * Returns the size of the string INCLUDING the terminating NUL.
 477 * On exception, returns 0.
 478 *
 479 * If there is a limit on the length of a valid string, you may wish to
 480 * consider using strnlen_user() instead.
 481 */
 482extern long strnlen_user_asm(const char __user *str, long n);
 483static inline long __must_check strnlen_user(const char __user *str, long n)
 484{
 485        might_fault();
 486        return strnlen_user_asm(str, n);
 487}
 488#define strlen_user(str) strnlen_user(str, LONG_MAX)
 489
 490/**
 491 * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
 492 * @dst:   Destination address, in kernel space.  This buffer must be at
 493 *         least @count bytes long.
 494 * @src:   Source address, in user space.
 495 * @count: Maximum number of bytes to copy, including the trailing NUL.
 496 *
 497 * Copies a NUL-terminated string from userspace to kernel space.
 498 * Caller must check the specified block with access_ok() before calling
 499 * this function.
 500 *
 501 * On success, returns the length of the string (not including the trailing
 502 * NUL).
 503 *
 504 * If access to userspace fails, returns -EFAULT (some data may have been
 505 * copied).
 506 *
 507 * If @count is smaller than the length of the string, copies @count bytes
 508 * and returns @count.
 509 */
 510extern long strncpy_from_user_asm(char *dst, const char __user *src, long);
 511static inline long __must_check __strncpy_from_user(
 512        char *dst, const char __user *src, long count)
 513{
 514        might_fault();
 515        return strncpy_from_user_asm(dst, src, count);
 516}
 517static inline long __must_check strncpy_from_user(
 518        char *dst, const char __user *src, long count)
 519{
 520        if (access_ok(VERIFY_READ, src, 1))
 521                return __strncpy_from_user(dst, src, count);
 522        return -EFAULT;
 523}
 524
 525/**
 526 * clear_user: - Zero a block of memory in user space.
 527 * @mem:   Destination address, in user space.
 528 * @len:   Number of bytes to zero.
 529 *
 530 * Zero a block of memory in user space.
 531 *
 532 * Returns number of bytes that could not be cleared.
 533 * On success, this will be zero.
 534 */
 535extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
 536static inline unsigned long __must_check __clear_user(
 537        void __user *mem, unsigned long len)
 538{
 539        might_fault();
 540        return clear_user_asm(mem, len);
 541}
 542static inline unsigned long __must_check clear_user(
 543        void __user *mem, unsigned long len)
 544{
 545        if (access_ok(VERIFY_WRITE, mem, len))
 546                return __clear_user(mem, len);
 547        return len;
 548}
 549
 550/**
 551 * flush_user: - Flush a block of memory in user space from cache.
 552 * @mem:   Destination address, in user space.
 553 * @len:   Number of bytes to flush.
 554 *
 555 * Returns number of bytes that could not be flushed.
 556 * On success, this will be zero.
 557 */
 558extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
 559static inline unsigned long __must_check __flush_user(
 560        void __user *mem, unsigned long len)
 561{
 562        int retval;
 563
 564        might_fault();
 565        retval = flush_user_asm(mem, len);
 566        mb_incoherent();
 567        return retval;
 568}
 569
 570static inline unsigned long __must_check flush_user(
 571        void __user *mem, unsigned long len)
 572{
 573        if (access_ok(VERIFY_WRITE, mem, len))
 574                return __flush_user(mem, len);
 575        return len;
 576}
 577
 578/**
 579 * finv_user: - Flush-inval a block of memory in user space from cache.
 580 * @mem:   Destination address, in user space.
 581 * @len:   Number of bytes to invalidate.
 582 *
 583 * Returns number of bytes that could not be flush-invalidated.
 584 * On success, this will be zero.
 585 */
 586extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
 587static inline unsigned long __must_check __finv_user(
 588        void __user *mem, unsigned long len)
 589{
 590        int retval;
 591
 592        might_fault();
 593        retval = finv_user_asm(mem, len);
 594        mb_incoherent();
 595        return retval;
 596}
 597static inline unsigned long __must_check finv_user(
 598        void __user *mem, unsigned long len)
 599{
 600        if (access_ok(VERIFY_WRITE, mem, len))
 601                return __finv_user(mem, len);
 602        return len;
 603}
 604
 605#endif /* _ASM_TILE_UACCESS_H */
 606