linux/arch/tile/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#ifndef _ASM_TILE_UACCESS_H
  16#define _ASM_TILE_UACCESS_H
  17
  18/*
  19 * User space memory access functions
  20 */
  21#include <linux/sched.h>
  22#include <linux/mm.h>
  23#include <asm-generic/uaccess-unaligned.h>
  24#include <asm/processor.h>
  25#include <asm/page.h>
  26
  27#define VERIFY_READ     0
  28#define VERIFY_WRITE    1
  29
  30/*
  31 * The fs value determines whether argument validity checking should be
  32 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  33 * get_fs() == KERNEL_DS, checking is bypassed.
  34 *
  35 * For historical reasons, these macros are grossly misnamed.
  36 */
  37#define MAKE_MM_SEG(a)  ((mm_segment_t) { (a) })
  38
  39#define KERNEL_DS       MAKE_MM_SEG(-1UL)
  40#define USER_DS         MAKE_MM_SEG(PAGE_OFFSET)
  41
  42#define get_ds()        (KERNEL_DS)
  43#define get_fs()        (current_thread_info()->addr_limit)
  44#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  45
  46#define segment_eq(a, b) ((a).seg == (b).seg)
  47
  48#ifndef __tilegx__
  49/*
  50 * We could allow mapping all 16 MB at 0xfc000000, but we set up a
  51 * special hack in arch_setup_additional_pages() to auto-create a mapping
  52 * for the first 16 KB, and it would seem strange to have different
  53 * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000.
  54 */
  55static inline int is_arch_mappable_range(unsigned long addr,
  56                                         unsigned long size)
  57{
  58        return (addr >= MEM_USER_INTRPT &&
  59                addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
  60                size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
  61}
  62#define is_arch_mappable_range is_arch_mappable_range
  63#else
  64#define is_arch_mappable_range(addr, size) 0
  65#endif
  66
  67/*
  68 * Note that using this definition ignores is_arch_mappable_range(),
  69 * so on tilepro code that uses user_addr_max() is constrained not
  70 * to reference the tilepro user-interrupt region.
  71 */
  72#define user_addr_max() (current_thread_info()->addr_limit.seg)
  73
  74/*
  75 * Test whether a block of memory is a valid user space address.
  76 * Returns 0 if the range is valid, nonzero otherwise.
  77 */
  78int __range_ok(unsigned long addr, unsigned long size);
  79
  80/**
  81 * access_ok: - Checks if a user space pointer is valid
  82 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
  83 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  84 *        to write to a block, it is always safe to read from it.
  85 * @addr: User space pointer to start of block to check
  86 * @size: Size of block to check
  87 *
  88 * Context: User context only. This function may sleep if pagefaults are
  89 *          enabled.
  90 *
  91 * Checks if a pointer to a block of memory in user space is valid.
  92 *
  93 * Returns true (nonzero) if the memory block may be valid, false (zero)
  94 * if it is definitely invalid.
  95 *
  96 * Note that, depending on architecture, this function probably just
  97 * checks that the pointer is in the user space range - after calling
  98 * this function, memory access functions may still return -EFAULT.
  99 */
 100#define access_ok(type, addr, size) ({ \
 101        __chk_user_ptr(addr); \
 102        likely(__range_ok((unsigned long)(addr), (size)) == 0); \
 103})
 104
 105/*
 106 * The exception table consists of pairs of addresses: the first is the
 107 * address of an instruction that is allowed to fault, and the second is
 108 * the address at which the program should continue.  No registers are
 109 * modified, so it is entirely up to the continuation code to figure out
 110 * what to do.
 111 *
 112 * All the routines below use bits of fixup code that are out of line
 113 * with the main instruction path.  This means when everything is well,
 114 * we don't even have to jump over them.  Further, they do not intrude
 115 * on our cache or tlb entries.
 116 */
 117
 118struct exception_table_entry {
 119        unsigned long insn, fixup;
 120};
 121
 122extern int fixup_exception(struct pt_regs *regs);
 123
 124/*
 125 * This is a type: either unsigned long, if the argument fits into
 126 * that type, or otherwise unsigned long long.
 127 */
 128#define __inttype(x) \
 129        __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 130
 131/*
 132 * Support macros for __get_user().
 133 * Note that __get_user() and __put_user() assume proper alignment.
 134 */
 135
 136#ifdef __LP64__
 137#define _ASM_PTR        ".quad"
 138#define _ASM_ALIGN      ".align 8"
 139#else
 140#define _ASM_PTR        ".long"
 141#define _ASM_ALIGN      ".align 4"
 142#endif
 143
 144#define __get_user_asm(OP, x, ptr, ret)                                 \
 145        asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"              \
 146                     ".pushsection .fixup,\"ax\"\n"                     \
 147                     "0: { movei %1, 0; movei %0, %3 }\n"               \
 148                     "j 9f\n"                                           \
 149                     ".section __ex_table,\"a\"\n"                      \
 150                     _ASM_ALIGN "\n"                                    \
 151                     _ASM_PTR " 1b, 0b\n"                               \
 152                     ".popsection\n"                                    \
 153                     "9:"                                               \
 154                     : "=r" (ret), "=r" (x)                             \
 155                     : "r" (ptr), "i" (-EFAULT))
 156
 157#ifdef __tilegx__
 158#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
 159#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
 160#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
 161#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
 162#else
 163#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
 164#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
 165#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
 166#ifdef __LITTLE_ENDIAN
 167#define __lo32(a, b) a
 168#define __hi32(a, b) b
 169#else
 170#define __lo32(a, b) b
 171#define __hi32(a, b) a
 172#endif
 173#define __get_user_8(x, ptr, ret)                                       \
 174        ({                                                              \
 175                unsigned int __a, __b;                                  \
 176                asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n"       \
 177                             "2: { lw %2, %2; movei %0, 0 }\n"          \
 178                             ".pushsection .fixup,\"ax\"\n"             \
 179                             "0: { movei %1, 0; movei %2, 0 }\n"        \
 180                             "{ movei %0, %4; j 9f }\n"                 \
 181                             ".section __ex_table,\"a\"\n"              \
 182                             ".align 4\n"                               \
 183                             ".word 1b, 0b\n"                           \
 184                             ".word 2b, 0b\n"                           \
 185                             ".popsection\n"                            \
 186                             "9:"                                       \
 187                             : "=r" (ret), "=r" (__a), "=&r" (__b)      \
 188                             : "r" (ptr), "i" (-EFAULT));               \
 189                (x) = (__force __typeof(x))(__inttype(x))               \
 190                        (((u64)__hi32(__a, __b) << 32) |                \
 191                         __lo32(__a, __b));                             \
 192        })
 193#endif
 194
 195extern int __get_user_bad(void)
 196  __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
 197
 198/**
 199 * __get_user: - Get a simple variable from user space, with less checking.
 200 * @x:   Variable to store result.
 201 * @ptr: Source address, in user space.
 202 *
 203 * Context: User context only. This function may sleep if pagefaults are
 204 *          enabled.
 205 *
 206 * This macro copies a single simple variable from user space to kernel
 207 * space.  It supports simple types like char and int, but not larger
 208 * data types like structures or arrays.
 209 *
 210 * @ptr must have pointer-to-simple-variable type, and the result of
 211 * dereferencing @ptr must be assignable to @x without a cast.
 212 *
 213 * Returns zero on success, or -EFAULT on error.
 214 * On error, the variable @x is set to zero.
 215 *
 216 * Caller must check the pointer with access_ok() before calling this
 217 * function.
 218 */
 219#define __get_user(x, ptr)                                              \
 220        ({                                                              \
 221                int __ret;                                              \
 222                typeof(x) _x;                                           \
 223                __chk_user_ptr(ptr);                                    \
 224                switch (sizeof(*(ptr))) {                               \
 225                case 1: __get_user_1(_x, ptr, __ret); break;            \
 226                case 2: __get_user_2(_x, ptr, __ret); break;            \
 227                case 4: __get_user_4(_x, ptr, __ret); break;            \
 228                case 8: __get_user_8(_x, ptr, __ret); break;            \
 229                default: __ret = __get_user_bad(); break;               \
 230                }                                                       \
 231                (x) = (typeof(*(ptr))) _x;                              \
 232                __ret;                                                  \
 233        })
 234
 235/* Support macros for __put_user(). */
 236
 237#define __put_user_asm(OP, x, ptr, ret)                 \
 238        asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"              \
 239                     ".pushsection .fixup,\"ax\"\n"                     \
 240                     "0: { movei %0, %3; j 9f }\n"                      \
 241                     ".section __ex_table,\"a\"\n"                      \
 242                     _ASM_ALIGN "\n"                                    \
 243                     _ASM_PTR " 1b, 0b\n"                               \
 244                     ".popsection\n"                                    \
 245                     "9:"                                               \
 246                     : "=r" (ret)                                       \
 247                     : "r" (ptr), "r" (x), "i" (-EFAULT))
 248
 249#ifdef __tilegx__
 250#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
 251#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
 252#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
 253#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
 254#else
 255#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
 256#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
 257#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
 258#define __put_user_8(x, ptr, ret)                                       \
 259        ({                                                              \
 260                u64 __x = (__force __inttype(x))(x);                    \
 261                int __lo = (int) __x, __hi = (int) (__x >> 32);         \
 262                asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n"       \
 263                             "2: { sw %0, %3; movei %0, 0 }\n"          \
 264                             ".pushsection .fixup,\"ax\"\n"             \
 265                             "0: { movei %0, %4; j 9f }\n"              \
 266                             ".section __ex_table,\"a\"\n"              \
 267                             ".align 4\n"                               \
 268                             ".word 1b, 0b\n"                           \
 269                             ".word 2b, 0b\n"                           \
 270                             ".popsection\n"                            \
 271                             "9:"                                       \
 272                             : "=&r" (ret)                              \
 273                             : "r" (ptr), "r" (__lo32(__lo, __hi)),     \
 274                             "r" (__hi32(__lo, __hi)), "i" (-EFAULT));  \
 275        })
 276#endif
 277
 278extern int __put_user_bad(void)
 279  __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
 280
 281/**
 282 * __put_user: - Write a simple value into user space, with less checking.
 283 * @x:   Value to copy to user space.
 284 * @ptr: Destination address, in user space.
 285 *
 286 * Context: User context only. This function may sleep if pagefaults are
 287 *          enabled.
 288 *
 289 * This macro copies a single simple value from kernel space to user
 290 * space.  It supports simple types like char and int, but not larger
 291 * data types like structures or arrays.
 292 *
 293 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 294 * to the result of dereferencing @ptr.
 295 *
 296 * Caller must check the pointer with access_ok() before calling this
 297 * function.
 298 *
 299 * Returns zero on success, or -EFAULT on error.
 300 */
 301#define __put_user(x, ptr)                                              \
 302({                                                                      \
 303        int __ret;                                                      \
 304        typeof(*(ptr)) _x = (x);                                        \
 305        __chk_user_ptr(ptr);                                            \
 306        switch (sizeof(*(ptr))) {                                       \
 307        case 1: __put_user_1(_x, ptr, __ret); break;                    \
 308        case 2: __put_user_2(_x, ptr, __ret); break;                    \
 309        case 4: __put_user_4(_x, ptr, __ret); break;                    \
 310        case 8: __put_user_8(_x, ptr, __ret); break;                    \
 311        default: __ret = __put_user_bad(); break;                       \
 312        }                                                               \
 313        __ret;                                                          \
 314})
 315
 316/*
 317 * The versions of get_user and put_user without initial underscores
 318 * check the address of their arguments to make sure they are not
 319 * in kernel space.
 320 */
 321#define put_user(x, ptr)                                                \
 322({                                                                      \
 323        __typeof__(*(ptr)) __user *__Pu_addr = (ptr);                   \
 324        access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ?    \
 325                __put_user((x), (__Pu_addr)) :                          \
 326                -EFAULT;                                                \
 327})
 328
 329#define get_user(x, ptr)                                                \
 330({                                                                      \
 331        __typeof__(*(ptr)) const __user *__Gu_addr = (ptr);             \
 332        access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ?     \
 333                __get_user((x), (__Gu_addr)) :                          \
 334                ((x) = 0, -EFAULT);                                     \
 335})
 336
 337/**
 338 * __copy_to_user() - copy data into user space, with less checking.
 339 * @to:   Destination address, in user space.
 340 * @from: Source address, in kernel space.
 341 * @n:    Number of bytes to copy.
 342 *
 343 * Context: User context only. This function may sleep if pagefaults are
 344 *          enabled.
 345 *
 346 * Copy data from kernel space to user space.  Caller must check
 347 * the specified block with access_ok() before calling this function.
 348 *
 349 * Returns number of bytes that could not be copied.
 350 * On success, this will be zero.
 351 *
 352 * An alternate version - __copy_to_user_inatomic() - is designed
 353 * to be called from atomic context, typically bracketed by calls
 354 * to pagefault_disable() and pagefault_enable().
 355 */
 356extern unsigned long __must_check __copy_to_user_inatomic(
 357        void __user *to, const void *from, unsigned long n);
 358
 359static inline unsigned long __must_check
 360__copy_to_user(void __user *to, const void *from, unsigned long n)
 361{
 362        might_fault();
 363        return __copy_to_user_inatomic(to, from, n);
 364}
 365
 366static inline unsigned long __must_check
 367copy_to_user(void __user *to, const void *from, unsigned long n)
 368{
 369        if (access_ok(VERIFY_WRITE, to, n))
 370                n = __copy_to_user(to, from, n);
 371        return n;
 372}
 373
 374/**
 375 * __copy_from_user() - copy data from user space, with less checking.
 376 * @to:   Destination address, in kernel space.
 377 * @from: Source address, in user space.
 378 * @n:    Number of bytes to copy.
 379 *
 380 * Context: User context only. This function may sleep if pagefaults are
 381 *          enabled.
 382 *
 383 * Copy data from user space to kernel space.  Caller must check
 384 * the specified block with access_ok() before calling this function.
 385 *
 386 * Returns number of bytes that could not be copied.
 387 * On success, this will be zero.
 388 *
 389 * If some data could not be copied, this function will pad the copied
 390 * data to the requested size using zero bytes.
 391 *
 392 * An alternate version - __copy_from_user_inatomic() - is designed
 393 * to be called from atomic context, typically bracketed by calls
 394 * to pagefault_disable() and pagefault_enable().  This version
 395 * does *NOT* pad with zeros.
 396 */
 397extern unsigned long __must_check __copy_from_user_inatomic(
 398        void *to, const void __user *from, unsigned long n);
 399extern unsigned long __must_check __copy_from_user_zeroing(
 400        void *to, const void __user *from, unsigned long n);
 401
 402static inline unsigned long __must_check
 403__copy_from_user(void *to, const void __user *from, unsigned long n)
 404{
 405       might_fault();
 406       return __copy_from_user_zeroing(to, from, n);
 407}
 408
 409static inline unsigned long __must_check
 410_copy_from_user(void *to, const void __user *from, unsigned long n)
 411{
 412        if (access_ok(VERIFY_READ, from, n))
 413                n = __copy_from_user(to, from, n);
 414        else
 415                memset(to, 0, n);
 416        return n;
 417}
 418
 419extern void __compiletime_error("usercopy buffer size is too small")
 420__bad_copy_user(void);
 421
 422static inline void copy_user_overflow(int size, unsigned long count)
 423{
 424        WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
 425}
 426
 427static inline unsigned long __must_check copy_from_user(void *to,
 428                                          const void __user *from,
 429                                          unsigned long n)
 430{
 431        int sz = __compiletime_object_size(to);
 432
 433        if (likely(sz == -1 || sz >= n))
 434                n = _copy_from_user(to, from, n);
 435        else if (!__builtin_constant_p(n))
 436                copy_user_overflow(sz, n);
 437        else
 438                __bad_copy_user();
 439
 440        return n;
 441}
 442
 443#ifdef __tilegx__
 444/**
 445 * __copy_in_user() - copy data within user space, with less checking.
 446 * @to:   Destination address, in user space.
 447 * @from: Source address, in user space.
 448 * @n:    Number of bytes to copy.
 449 *
 450 * Context: User context only. This function may sleep if pagefaults are
 451 *          enabled.
 452 *
 453 * Copy data from user space to user space.  Caller must check
 454 * the specified blocks with access_ok() before calling this function.
 455 *
 456 * Returns number of bytes that could not be copied.
 457 * On success, this will be zero.
 458 */
 459extern unsigned long __copy_in_user_inatomic(
 460        void __user *to, const void __user *from, unsigned long n);
 461
 462static inline unsigned long __must_check
 463__copy_in_user(void __user *to, const void __user *from, unsigned long n)
 464{
 465        might_fault();
 466        return __copy_in_user_inatomic(to, from, n);
 467}
 468
 469static inline unsigned long __must_check
 470copy_in_user(void __user *to, const void __user *from, unsigned long n)
 471{
 472        if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
 473                n = __copy_in_user(to, from, n);
 474        return n;
 475}
 476#endif
 477
 478
 479extern long strnlen_user(const char __user *str, long n);
 480extern long strlen_user(const char __user *str);
 481extern long strncpy_from_user(char *dst, const char __user *src, long);
 482
 483/**
 484 * clear_user: - Zero a block of memory in user space.
 485 * @mem:   Destination address, in user space.
 486 * @len:   Number of bytes to zero.
 487 *
 488 * Zero a block of memory in user space.
 489 *
 490 * Returns number of bytes that could not be cleared.
 491 * On success, this will be zero.
 492 */
 493extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
 494static inline unsigned long __must_check __clear_user(
 495        void __user *mem, unsigned long len)
 496{
 497        might_fault();
 498        return clear_user_asm(mem, len);
 499}
 500static inline unsigned long __must_check clear_user(
 501        void __user *mem, unsigned long len)
 502{
 503        if (access_ok(VERIFY_WRITE, mem, len))
 504                return __clear_user(mem, len);
 505        return len;
 506}
 507
 508/**
 509 * flush_user: - Flush a block of memory in user space from cache.
 510 * @mem:   Destination address, in user space.
 511 * @len:   Number of bytes to flush.
 512 *
 513 * Returns number of bytes that could not be flushed.
 514 * On success, this will be zero.
 515 */
 516extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
 517static inline unsigned long __must_check __flush_user(
 518        void __user *mem, unsigned long len)
 519{
 520        int retval;
 521
 522        might_fault();
 523        retval = flush_user_asm(mem, len);
 524        mb_incoherent();
 525        return retval;
 526}
 527
 528static inline unsigned long __must_check flush_user(
 529        void __user *mem, unsigned long len)
 530{
 531        if (access_ok(VERIFY_WRITE, mem, len))
 532                return __flush_user(mem, len);
 533        return len;
 534}
 535
 536/**
 537 * finv_user: - Flush-inval a block of memory in user space from cache.
 538 * @mem:   Destination address, in user space.
 539 * @len:   Number of bytes to invalidate.
 540 *
 541 * Returns number of bytes that could not be flush-invalidated.
 542 * On success, this will be zero.
 543 */
 544extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
 545static inline unsigned long __must_check __finv_user(
 546        void __user *mem, unsigned long len)
 547{
 548        int retval;
 549
 550        might_fault();
 551        retval = finv_user_asm(mem, len);
 552        mb_incoherent();
 553        return retval;
 554}
 555static inline unsigned long __must_check finv_user(
 556        void __user *mem, unsigned long len)
 557{
 558        if (access_ok(VERIFY_WRITE, mem, len))
 559                return __finv_user(mem, len);
 560        return len;
 561}
 562
 563#endif /* _ASM_TILE_UACCESS_H */
 564