linux/arch/tile/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#ifndef _ASM_TILE_UACCESS_H
  16#define _ASM_TILE_UACCESS_H
  17
  18/*
  19 * User space memory access functions
  20 */
  21#include <linux/sched.h>
  22#include <linux/mm.h>
  23#include <asm-generic/uaccess-unaligned.h>
  24#include <asm/processor.h>
  25#include <asm/page.h>
  26
  27#define VERIFY_READ     0
  28#define VERIFY_WRITE    1
  29
  30/*
  31 * The fs value determines whether argument validity checking should be
  32 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  33 * get_fs() == KERNEL_DS, checking is bypassed.
  34 *
  35 * For historical reasons, these macros are grossly misnamed.
  36 */
  37#define MAKE_MM_SEG(a)  ((mm_segment_t) { (a) })
  38
  39#define KERNEL_DS       MAKE_MM_SEG(-1UL)
  40#define USER_DS         MAKE_MM_SEG(PAGE_OFFSET)
  41
  42#define get_ds()        (KERNEL_DS)
  43#define get_fs()        (current_thread_info()->addr_limit)
  44#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  45
  46#define segment_eq(a, b) ((a).seg == (b).seg)
  47
  48#ifndef __tilegx__
  49/*
  50 * We could allow mapping all 16 MB at 0xfc000000, but we set up a
  51 * special hack in arch_setup_additional_pages() to auto-create a mapping
  52 * for the first 16 KB, and it would seem strange to have different
  53 * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000.
  54 */
  55static inline int is_arch_mappable_range(unsigned long addr,
  56                                         unsigned long size)
  57{
  58        return (addr >= MEM_USER_INTRPT &&
  59                addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
  60                size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
  61}
  62#define is_arch_mappable_range is_arch_mappable_range
  63#else
  64#define is_arch_mappable_range(addr, size) 0
  65#endif
  66
  67/*
  68 * Test whether a block of memory is a valid user space address.
  69 * Returns 0 if the range is valid, nonzero otherwise.
  70 */
  71int __range_ok(unsigned long addr, unsigned long size);
  72
  73/**
  74 * access_ok: - Checks if a user space pointer is valid
  75 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
  76 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  77 *        to write to a block, it is always safe to read from it.
  78 * @addr: User space pointer to start of block to check
  79 * @size: Size of block to check
  80 *
  81 * Context: User context only.  This function may sleep.
  82 *
  83 * Checks if a pointer to a block of memory in user space is valid.
  84 *
  85 * Returns true (nonzero) if the memory block may be valid, false (zero)
  86 * if it is definitely invalid.
  87 *
  88 * Note that, depending on architecture, this function probably just
  89 * checks that the pointer is in the user space range - after calling
  90 * this function, memory access functions may still return -EFAULT.
  91 */
  92#define access_ok(type, addr, size) ({ \
  93        __chk_user_ptr(addr); \
  94        likely(__range_ok((unsigned long)(addr), (size)) == 0); \
  95})
  96
  97/*
  98 * The exception table consists of pairs of addresses: the first is the
  99 * address of an instruction that is allowed to fault, and the second is
 100 * the address at which the program should continue.  No registers are
 101 * modified, so it is entirely up to the continuation code to figure out
 102 * what to do.
 103 *
 104 * All the routines below use bits of fixup code that are out of line
 105 * with the main instruction path.  This means when everything is well,
 106 * we don't even have to jump over them.  Further, they do not intrude
 107 * on our cache or tlb entries.
 108 */
 109
 110struct exception_table_entry {
 111        unsigned long insn, fixup;
 112};
 113
 114extern int fixup_exception(struct pt_regs *regs);
 115
 116/*
 117 * Support macros for __get_user().
 118 *
 119 * Implementation note: The "case 8" logic of casting to the type of
 120 * the result of subtracting the value from itself is basically a way
 121 * of keeping all integer types the same, but casting any pointers to
 122 * ptrdiff_t, i.e. also an integer type.  This way there are no
 123 * questionable casts seen by the compiler on an ILP32 platform.
 124 *
 125 * Note that __get_user() and __put_user() assume proper alignment.
 126 */
 127
 128#ifdef __LP64__
 129#define _ASM_PTR        ".quad"
 130#define _ASM_ALIGN      ".align 8"
 131#else
 132#define _ASM_PTR        ".long"
 133#define _ASM_ALIGN      ".align 4"
 134#endif
 135
 136#define __get_user_asm(OP, x, ptr, ret)                                 \
 137        asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"              \
 138                     ".pushsection .fixup,\"ax\"\n"                     \
 139                     "0: { movei %1, 0; movei %0, %3 }\n"               \
 140                     "j 9f\n"                                           \
 141                     ".section __ex_table,\"a\"\n"                      \
 142                     _ASM_ALIGN "\n"                                    \
 143                     _ASM_PTR " 1b, 0b\n"                               \
 144                     ".popsection\n"                                    \
 145                     "9:"                                               \
 146                     : "=r" (ret), "=r" (x)                             \
 147                     : "r" (ptr), "i" (-EFAULT))
 148
 149#ifdef __tilegx__
 150#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
 151#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
 152#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
 153#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
 154#else
 155#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
 156#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
 157#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
 158#ifdef __LITTLE_ENDIAN
 159#define __lo32(a, b) a
 160#define __hi32(a, b) b
 161#else
 162#define __lo32(a, b) b
 163#define __hi32(a, b) a
 164#endif
 165#define __get_user_8(x, ptr, ret)                                       \
 166        ({                                                              \
 167                unsigned int __a, __b;                                  \
 168                asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n"       \
 169                             "2: { lw %2, %2; movei %0, 0 }\n"          \
 170                             ".pushsection .fixup,\"ax\"\n"             \
 171                             "0: { movei %1, 0; movei %2, 0 }\n"        \
 172                             "{ movei %0, %4; j 9f }\n"                 \
 173                             ".section __ex_table,\"a\"\n"              \
 174                             ".align 4\n"                               \
 175                             ".word 1b, 0b\n"                           \
 176                             ".word 2b, 0b\n"                           \
 177                             ".popsection\n"                            \
 178                             "9:"                                       \
 179                             : "=r" (ret), "=r" (__a), "=&r" (__b)      \
 180                             : "r" (ptr), "i" (-EFAULT));               \
 181                (x) = (__typeof(x))(__typeof((x)-(x)))                  \
 182                        (((u64)__hi32(__a, __b) << 32) |                \
 183                         __lo32(__a, __b));                             \
 184        })
 185#endif
 186
 187extern int __get_user_bad(void)
 188  __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
 189
 190/**
 191 * __get_user: - Get a simple variable from user space, with less checking.
 192 * @x:   Variable to store result.
 193 * @ptr: Source address, in user space.
 194 *
 195 * Context: User context only.  This function may sleep.
 196 *
 197 * This macro copies a single simple variable from user space to kernel
 198 * space.  It supports simple types like char and int, but not larger
 199 * data types like structures or arrays.
 200 *
 201 * @ptr must have pointer-to-simple-variable type, and the result of
 202 * dereferencing @ptr must be assignable to @x without a cast.
 203 *
 204 * Returns zero on success, or -EFAULT on error.
 205 * On error, the variable @x is set to zero.
 206 *
 207 * Caller must check the pointer with access_ok() before calling this
 208 * function.
 209 */
 210#define __get_user(x, ptr)                                              \
 211        ({                                                              \
 212                int __ret;                                              \
 213                __chk_user_ptr(ptr);                                    \
 214                switch (sizeof(*(ptr))) {                               \
 215                case 1: __get_user_1(x, ptr, __ret); break;             \
 216                case 2: __get_user_2(x, ptr, __ret); break;             \
 217                case 4: __get_user_4(x, ptr, __ret); break;             \
 218                case 8: __get_user_8(x, ptr, __ret); break;             \
 219                default: __ret = __get_user_bad(); break;               \
 220                }                                                       \
 221                __ret;                                                  \
 222        })
 223
 224/* Support macros for __put_user(). */
 225
 226#define __put_user_asm(OP, x, ptr, ret)                 \
 227        asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"              \
 228                     ".pushsection .fixup,\"ax\"\n"                     \
 229                     "0: { movei %0, %3; j 9f }\n"                      \
 230                     ".section __ex_table,\"a\"\n"                      \
 231                     _ASM_ALIGN "\n"                                    \
 232                     _ASM_PTR " 1b, 0b\n"                               \
 233                     ".popsection\n"                                    \
 234                     "9:"                                               \
 235                     : "=r" (ret)                                       \
 236                     : "r" (ptr), "r" (x), "i" (-EFAULT))
 237
 238#ifdef __tilegx__
 239#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
 240#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
 241#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
 242#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
 243#else
 244#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
 245#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
 246#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
 247#define __put_user_8(x, ptr, ret)                                       \
 248        ({                                                              \
 249                u64 __x = (__typeof((x)-(x)))(x);                       \
 250                int __lo = (int) __x, __hi = (int) (__x >> 32);         \
 251                asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n"       \
 252                             "2: { sw %0, %3; movei %0, 0 }\n"          \
 253                             ".pushsection .fixup,\"ax\"\n"             \
 254                             "0: { movei %0, %4; j 9f }\n"              \
 255                             ".section __ex_table,\"a\"\n"              \
 256                             ".align 4\n"                               \
 257                             ".word 1b, 0b\n"                           \
 258                             ".word 2b, 0b\n"                           \
 259                             ".popsection\n"                            \
 260                             "9:"                                       \
 261                             : "=&r" (ret)                              \
 262                             : "r" (ptr), "r" (__lo32(__lo, __hi)),     \
 263                             "r" (__hi32(__lo, __hi)), "i" (-EFAULT));  \
 264        })
 265#endif
 266
 267extern int __put_user_bad(void)
 268  __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
 269
 270/**
 271 * __put_user: - Write a simple value into user space, with less checking.
 272 * @x:   Value to copy to user space.
 273 * @ptr: Destination address, in user space.
 274 *
 275 * Context: User context only.  This function may sleep.
 276 *
 277 * This macro copies a single simple value from kernel space to user
 278 * space.  It supports simple types like char and int, but not larger
 279 * data types like structures or arrays.
 280 *
 281 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 282 * to the result of dereferencing @ptr.
 283 *
 284 * Caller must check the pointer with access_ok() before calling this
 285 * function.
 286 *
 287 * Returns zero on success, or -EFAULT on error.
 288 */
 289#define __put_user(x, ptr)                                              \
 290({                                                                      \
 291        int __ret;                                                      \
 292        __chk_user_ptr(ptr);                                            \
 293        switch (sizeof(*(ptr))) {                                       \
 294        case 1: __put_user_1(x, ptr, __ret); break;                     \
 295        case 2: __put_user_2(x, ptr, __ret); break;                     \
 296        case 4: __put_user_4(x, ptr, __ret); break;                     \
 297        case 8: __put_user_8(x, ptr, __ret); break;                     \
 298        default: __ret = __put_user_bad(); break;                       \
 299        }                                                               \
 300        __ret;                                                          \
 301})
 302
 303/*
 304 * The versions of get_user and put_user without initial underscores
 305 * check the address of their arguments to make sure they are not
 306 * in kernel space.
 307 */
 308#define put_user(x, ptr)                                                \
 309({                                                                      \
 310        __typeof__(*(ptr)) __user *__Pu_addr = (ptr);                   \
 311        access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ?    \
 312                __put_user((x), (__Pu_addr)) :                          \
 313                -EFAULT;                                                \
 314})
 315
 316#define get_user(x, ptr)                                                \
 317({                                                                      \
 318        __typeof__(*(ptr)) const __user *__Gu_addr = (ptr);             \
 319        access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ?     \
 320                __get_user((x), (__Gu_addr)) :                          \
 321                ((x) = 0, -EFAULT);                                     \
 322})
 323
 324/**
 325 * __copy_to_user() - copy data into user space, with less checking.
 326 * @to:   Destination address, in user space.
 327 * @from: Source address, in kernel space.
 328 * @n:    Number of bytes to copy.
 329 *
 330 * Context: User context only.  This function may sleep.
 331 *
 332 * Copy data from kernel space to user space.  Caller must check
 333 * the specified block with access_ok() before calling this function.
 334 *
 335 * Returns number of bytes that could not be copied.
 336 * On success, this will be zero.
 337 *
 338 * An alternate version - __copy_to_user_inatomic() - is designed
 339 * to be called from atomic context, typically bracketed by calls
 340 * to pagefault_disable() and pagefault_enable().
 341 */
 342extern unsigned long __must_check __copy_to_user_inatomic(
 343        void __user *to, const void *from, unsigned long n);
 344
 345static inline unsigned long __must_check
 346__copy_to_user(void __user *to, const void *from, unsigned long n)
 347{
 348        might_fault();
 349        return __copy_to_user_inatomic(to, from, n);
 350}
 351
 352static inline unsigned long __must_check
 353copy_to_user(void __user *to, const void *from, unsigned long n)
 354{
 355        if (access_ok(VERIFY_WRITE, to, n))
 356                n = __copy_to_user(to, from, n);
 357        return n;
 358}
 359
 360/**
 361 * __copy_from_user() - copy data from user space, with less checking.
 362 * @to:   Destination address, in kernel space.
 363 * @from: Source address, in user space.
 364 * @n:    Number of bytes to copy.
 365 *
 366 * Context: User context only.  This function may sleep.
 367 *
 368 * Copy data from user space to kernel space.  Caller must check
 369 * the specified block with access_ok() before calling this function.
 370 *
 371 * Returns number of bytes that could not be copied.
 372 * On success, this will be zero.
 373 *
 374 * If some data could not be copied, this function will pad the copied
 375 * data to the requested size using zero bytes.
 376 *
 377 * An alternate version - __copy_from_user_inatomic() - is designed
 378 * to be called from atomic context, typically bracketed by calls
 379 * to pagefault_disable() and pagefault_enable().  This version
 380 * does *NOT* pad with zeros.
 381 */
 382extern unsigned long __must_check __copy_from_user_inatomic(
 383        void *to, const void __user *from, unsigned long n);
 384extern unsigned long __must_check __copy_from_user_zeroing(
 385        void *to, const void __user *from, unsigned long n);
 386
 387static inline unsigned long __must_check
 388__copy_from_user(void *to, const void __user *from, unsigned long n)
 389{
 390       might_fault();
 391       return __copy_from_user_zeroing(to, from, n);
 392}
 393
 394static inline unsigned long __must_check
 395_copy_from_user(void *to, const void __user *from, unsigned long n)
 396{
 397        if (access_ok(VERIFY_READ, from, n))
 398                n = __copy_from_user(to, from, n);
 399        else
 400                memset(to, 0, n);
 401        return n;
 402}
 403
 404#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
 405/*
 406 * There are still unprovable places in the generic code as of 2.6.34, so this
 407 * option is not really compatible with -Werror, which is more useful in
 408 * general.
 409 */
 410extern void copy_from_user_overflow(void)
 411        __compiletime_warning("copy_from_user() size is not provably correct");
 412
 413static inline unsigned long __must_check copy_from_user(void *to,
 414                                          const void __user *from,
 415                                          unsigned long n)
 416{
 417        int sz = __compiletime_object_size(to);
 418
 419        if (likely(sz == -1 || sz >= n))
 420                n = _copy_from_user(to, from, n);
 421        else
 422                copy_from_user_overflow();
 423
 424        return n;
 425}
 426#else
 427#define copy_from_user _copy_from_user
 428#endif
 429
 430#ifdef __tilegx__
 431/**
 432 * __copy_in_user() - copy data within user space, with less checking.
 433 * @to:   Destination address, in user space.
 434 * @from: Source address, in user space.
 435 * @n:    Number of bytes to copy.
 436 *
 437 * Context: User context only.  This function may sleep.
 438 *
 439 * Copy data from user space to user space.  Caller must check
 440 * the specified blocks with access_ok() before calling this function.
 441 *
 442 * Returns number of bytes that could not be copied.
 443 * On success, this will be zero.
 444 */
 445extern unsigned long __copy_in_user_inatomic(
 446        void __user *to, const void __user *from, unsigned long n);
 447
 448static inline unsigned long __must_check
 449__copy_in_user(void __user *to, const void __user *from, unsigned long n)
 450{
 451        might_fault();
 452        return __copy_in_user_inatomic(to, from, n);
 453}
 454
 455static inline unsigned long __must_check
 456copy_in_user(void __user *to, const void __user *from, unsigned long n)
 457{
 458        if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
 459                n = __copy_in_user(to, from, n);
 460        return n;
 461}
 462#endif
 463
 464
 465/**
 466 * strlen_user: - Get the size of a string in user space.
 467 * @str: The string to measure.
 468 *
 469 * Context: User context only.  This function may sleep.
 470 *
 471 * Get the size of a NUL-terminated string in user space.
 472 *
 473 * Returns the size of the string INCLUDING the terminating NUL.
 474 * On exception, returns 0.
 475 *
 476 * If there is a limit on the length of a valid string, you may wish to
 477 * consider using strnlen_user() instead.
 478 */
 479extern long strnlen_user_asm(const char __user *str, long n);
 480static inline long __must_check strnlen_user(const char __user *str, long n)
 481{
 482        might_fault();
 483        return strnlen_user_asm(str, n);
 484}
 485#define strlen_user(str) strnlen_user(str, LONG_MAX)
 486
 487/**
 488 * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
 489 * @dst:   Destination address, in kernel space.  This buffer must be at
 490 *         least @count bytes long.
 491 * @src:   Source address, in user space.
 492 * @count: Maximum number of bytes to copy, including the trailing NUL.
 493 *
 494 * Copies a NUL-terminated string from userspace to kernel space.
 495 * Caller must check the specified block with access_ok() before calling
 496 * this function.
 497 *
 498 * On success, returns the length of the string (not including the trailing
 499 * NUL).
 500 *
 501 * If access to userspace fails, returns -EFAULT (some data may have been
 502 * copied).
 503 *
 504 * If @count is smaller than the length of the string, copies @count bytes
 505 * and returns @count.
 506 */
 507extern long strncpy_from_user_asm(char *dst, const char __user *src, long);
 508static inline long __must_check __strncpy_from_user(
 509        char *dst, const char __user *src, long count)
 510{
 511        might_fault();
 512        return strncpy_from_user_asm(dst, src, count);
 513}
 514static inline long __must_check strncpy_from_user(
 515        char *dst, const char __user *src, long count)
 516{
 517        if (access_ok(VERIFY_READ, src, 1))
 518                return __strncpy_from_user(dst, src, count);
 519        return -EFAULT;
 520}
 521
 522/**
 523 * clear_user: - Zero a block of memory in user space.
 524 * @mem:   Destination address, in user space.
 525 * @len:   Number of bytes to zero.
 526 *
 527 * Zero a block of memory in user space.
 528 *
 529 * Returns number of bytes that could not be cleared.
 530 * On success, this will be zero.
 531 */
 532extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
 533static inline unsigned long __must_check __clear_user(
 534        void __user *mem, unsigned long len)
 535{
 536        might_fault();
 537        return clear_user_asm(mem, len);
 538}
 539static inline unsigned long __must_check clear_user(
 540        void __user *mem, unsigned long len)
 541{
 542        if (access_ok(VERIFY_WRITE, mem, len))
 543                return __clear_user(mem, len);
 544        return len;
 545}
 546
 547/**
 548 * flush_user: - Flush a block of memory in user space from cache.
 549 * @mem:   Destination address, in user space.
 550 * @len:   Number of bytes to flush.
 551 *
 552 * Returns number of bytes that could not be flushed.
 553 * On success, this will be zero.
 554 */
 555extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
 556static inline unsigned long __must_check __flush_user(
 557        void __user *mem, unsigned long len)
 558{
 559        int retval;
 560
 561        might_fault();
 562        retval = flush_user_asm(mem, len);
 563        mb_incoherent();
 564        return retval;
 565}
 566
 567static inline unsigned long __must_check flush_user(
 568        void __user *mem, unsigned long len)
 569{
 570        if (access_ok(VERIFY_WRITE, mem, len))
 571                return __flush_user(mem, len);
 572        return len;
 573}
 574
 575/**
 576 * finv_user: - Flush-inval a block of memory in user space from cache.
 577 * @mem:   Destination address, in user space.
 578 * @len:   Number of bytes to invalidate.
 579 *
 580 * Returns number of bytes that could not be flush-invalidated.
 581 * On success, this will be zero.
 582 */
 583extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
 584static inline unsigned long __must_check __finv_user(
 585        void __user *mem, unsigned long len)
 586{
 587        int retval;
 588
 589        might_fault();
 590        retval = finv_user_asm(mem, len);
 591        mb_incoherent();
 592        return retval;
 593}
 594static inline unsigned long __must_check finv_user(
 595        void __user *mem, unsigned long len)
 596{
 597        if (access_ok(VERIFY_WRITE, mem, len))
 598                return __finv_user(mem, len);
 599        return len;
 600}
 601
 602#endif /* _ASM_TILE_UACCESS_H */
 603