linux/arch/tile/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#ifndef _ASM_TILE_UACCESS_H
  16#define _ASM_TILE_UACCESS_H
  17
  18/*
  19 * User space memory access functions
  20 */
  21#include <linux/mm.h>
  22#include <asm-generic/uaccess-unaligned.h>
  23#include <asm/processor.h>
  24#include <asm/page.h>
  25
  26/*
  27 * The fs value determines whether argument validity checking should be
  28 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  29 * get_fs() == KERNEL_DS, checking is bypassed.
  30 *
  31 * For historical reasons, these macros are grossly misnamed.
  32 */
  33#define MAKE_MM_SEG(a)  ((mm_segment_t) { (a) })
  34
  35#define KERNEL_DS       MAKE_MM_SEG(-1UL)
  36#define USER_DS         MAKE_MM_SEG(PAGE_OFFSET)
  37
  38#define get_ds()        (KERNEL_DS)
  39#define get_fs()        (current_thread_info()->addr_limit)
  40#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  41
  42#define segment_eq(a, b) ((a).seg == (b).seg)
  43
  44#ifndef __tilegx__
  45/*
  46 * We could allow mapping all 16 MB at 0xfc000000, but we set up a
  47 * special hack in arch_setup_additional_pages() to auto-create a mapping
  48 * for the first 16 KB, and it would seem strange to have different
  49 * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000.
  50 */
  51static inline int is_arch_mappable_range(unsigned long addr,
  52                                         unsigned long size)
  53{
  54        return (addr >= MEM_USER_INTRPT &&
  55                addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
  56                size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
  57}
  58#define is_arch_mappable_range is_arch_mappable_range
  59#else
  60#define is_arch_mappable_range(addr, size) 0
  61#endif
  62
  63/*
  64 * Note that using this definition ignores is_arch_mappable_range(),
  65 * so on tilepro code that uses user_addr_max() is constrained not
  66 * to reference the tilepro user-interrupt region.
  67 */
  68#define user_addr_max() (current_thread_info()->addr_limit.seg)
  69
  70/*
  71 * Test whether a block of memory is a valid user space address.
  72 * Returns 0 if the range is valid, nonzero otherwise.
  73 */
  74int __range_ok(unsigned long addr, unsigned long size);
  75
  76/**
  77 * access_ok: - Checks if a user space pointer is valid
  78 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
  79 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  80 *        to write to a block, it is always safe to read from it.
  81 * @addr: User space pointer to start of block to check
  82 * @size: Size of block to check
  83 *
  84 * Context: User context only. This function may sleep if pagefaults are
  85 *          enabled.
  86 *
  87 * Checks if a pointer to a block of memory in user space is valid.
  88 *
  89 * Returns true (nonzero) if the memory block may be valid, false (zero)
  90 * if it is definitely invalid.
  91 *
  92 * Note that, depending on architecture, this function probably just
  93 * checks that the pointer is in the user space range - after calling
  94 * this function, memory access functions may still return -EFAULT.
  95 */
  96#define access_ok(type, addr, size) ({ \
  97        __chk_user_ptr(addr); \
  98        likely(__range_ok((unsigned long)(addr), (size)) == 0); \
  99})
 100
 101#include <asm/extable.h>
 102
 103/*
 104 * This is a type: either unsigned long, if the argument fits into
 105 * that type, or otherwise unsigned long long.
 106 */
 107#define __inttype(x) \
 108        __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 109
 110/*
 111 * Support macros for __get_user().
 112 * Note that __get_user() and __put_user() assume proper alignment.
 113 */
 114
 115#ifdef __LP64__
 116#define _ASM_PTR        ".quad"
 117#define _ASM_ALIGN      ".align 8"
 118#else
 119#define _ASM_PTR        ".long"
 120#define _ASM_ALIGN      ".align 4"
 121#endif
 122
 123#define __get_user_asm(OP, x, ptr, ret)                                 \
 124        asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"              \
 125                     ".pushsection .fixup,\"ax\"\n"                     \
 126                     "0: { movei %1, 0; movei %0, %3 }\n"               \
 127                     "j 9f\n"                                           \
 128                     ".section __ex_table,\"a\"\n"                      \
 129                     _ASM_ALIGN "\n"                                    \
 130                     _ASM_PTR " 1b, 0b\n"                               \
 131                     ".popsection\n"                                    \
 132                     "9:"                                               \
 133                     : "=r" (ret), "=r" (x)                             \
 134                     : "r" (ptr), "i" (-EFAULT))
 135
 136#ifdef __tilegx__
 137#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
 138#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
 139#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
 140#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
 141#else
 142#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
 143#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
 144#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
 145#ifdef __LITTLE_ENDIAN
 146#define __lo32(a, b) a
 147#define __hi32(a, b) b
 148#else
 149#define __lo32(a, b) b
 150#define __hi32(a, b) a
 151#endif
 152#define __get_user_8(x, ptr, ret)                                       \
 153        ({                                                              \
 154                unsigned int __a, __b;                                  \
 155                asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n"       \
 156                             "2: { lw %2, %2; movei %0, 0 }\n"          \
 157                             ".pushsection .fixup,\"ax\"\n"             \
 158                             "0: { movei %1, 0; movei %2, 0 }\n"        \
 159                             "{ movei %0, %4; j 9f }\n"                 \
 160                             ".section __ex_table,\"a\"\n"              \
 161                             ".align 4\n"                               \
 162                             ".word 1b, 0b\n"                           \
 163                             ".word 2b, 0b\n"                           \
 164                             ".popsection\n"                            \
 165                             "9:"                                       \
 166                             : "=r" (ret), "=r" (__a), "=&r" (__b)      \
 167                             : "r" (ptr), "i" (-EFAULT));               \
 168                (x) = (__force __typeof(x))(__inttype(x))               \
 169                        (((u64)__hi32(__a, __b) << 32) |                \
 170                         __lo32(__a, __b));                             \
 171        })
 172#endif
 173
 174extern int __get_user_bad(void)
 175  __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
 176
 177/**
 178 * __get_user: - Get a simple variable from user space, with less checking.
 179 * @x:   Variable to store result.
 180 * @ptr: Source address, in user space.
 181 *
 182 * Context: User context only. This function may sleep if pagefaults are
 183 *          enabled.
 184 *
 185 * This macro copies a single simple variable from user space to kernel
 186 * space.  It supports simple types like char and int, but not larger
 187 * data types like structures or arrays.
 188 *
 189 * @ptr must have pointer-to-simple-variable type, and the result of
 190 * dereferencing @ptr must be assignable to @x without a cast.
 191 *
 192 * Returns zero on success, or -EFAULT on error.
 193 * On error, the variable @x is set to zero.
 194 *
 195 * Caller must check the pointer with access_ok() before calling this
 196 * function.
 197 */
 198#define __get_user(x, ptr)                                              \
 199        ({                                                              \
 200                int __ret;                                              \
 201                typeof(x) _x;                                           \
 202                __chk_user_ptr(ptr);                                    \
 203                switch (sizeof(*(ptr))) {                               \
 204                case 1: __get_user_1(_x, ptr, __ret); break;            \
 205                case 2: __get_user_2(_x, ptr, __ret); break;            \
 206                case 4: __get_user_4(_x, ptr, __ret); break;            \
 207                case 8: __get_user_8(_x, ptr, __ret); break;            \
 208                default: __ret = __get_user_bad(); break;               \
 209                }                                                       \
 210                (x) = (typeof(*(ptr))) _x;                              \
 211                __ret;                                                  \
 212        })
 213
 214/* Support macros for __put_user(). */
 215
 216#define __put_user_asm(OP, x, ptr, ret)                 \
 217        asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"              \
 218                     ".pushsection .fixup,\"ax\"\n"                     \
 219                     "0: { movei %0, %3; j 9f }\n"                      \
 220                     ".section __ex_table,\"a\"\n"                      \
 221                     _ASM_ALIGN "\n"                                    \
 222                     _ASM_PTR " 1b, 0b\n"                               \
 223                     ".popsection\n"                                    \
 224                     "9:"                                               \
 225                     : "=r" (ret)                                       \
 226                     : "r" (ptr), "r" (x), "i" (-EFAULT))
 227
 228#ifdef __tilegx__
 229#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
 230#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
 231#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
 232#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
 233#else
 234#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
 235#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
 236#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
 237#define __put_user_8(x, ptr, ret)                                       \
 238        ({                                                              \
 239                u64 __x = (__force __inttype(x))(x);                    \
 240                int __lo = (int) __x, __hi = (int) (__x >> 32);         \
 241                asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n"       \
 242                             "2: { sw %0, %3; movei %0, 0 }\n"          \
 243                             ".pushsection .fixup,\"ax\"\n"             \
 244                             "0: { movei %0, %4; j 9f }\n"              \
 245                             ".section __ex_table,\"a\"\n"              \
 246                             ".align 4\n"                               \
 247                             ".word 1b, 0b\n"                           \
 248                             ".word 2b, 0b\n"                           \
 249                             ".popsection\n"                            \
 250                             "9:"                                       \
 251                             : "=&r" (ret)                              \
 252                             : "r" (ptr), "r" (__lo32(__lo, __hi)),     \
 253                             "r" (__hi32(__lo, __hi)), "i" (-EFAULT));  \
 254        })
 255#endif
 256
 257extern int __put_user_bad(void)
 258  __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
 259
 260/**
 261 * __put_user: - Write a simple value into user space, with less checking.
 262 * @x:   Value to copy to user space.
 263 * @ptr: Destination address, in user space.
 264 *
 265 * Context: User context only. This function may sleep if pagefaults are
 266 *          enabled.
 267 *
 268 * This macro copies a single simple value from kernel space to user
 269 * space.  It supports simple types like char and int, but not larger
 270 * data types like structures or arrays.
 271 *
 272 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 273 * to the result of dereferencing @ptr.
 274 *
 275 * Caller must check the pointer with access_ok() before calling this
 276 * function.
 277 *
 278 * Returns zero on success, or -EFAULT on error.
 279 */
 280#define __put_user(x, ptr)                                              \
 281({                                                                      \
 282        int __ret;                                                      \
 283        typeof(*(ptr)) _x = (x);                                        \
 284        __chk_user_ptr(ptr);                                            \
 285        switch (sizeof(*(ptr))) {                                       \
 286        case 1: __put_user_1(_x, ptr, __ret); break;                    \
 287        case 2: __put_user_2(_x, ptr, __ret); break;                    \
 288        case 4: __put_user_4(_x, ptr, __ret); break;                    \
 289        case 8: __put_user_8(_x, ptr, __ret); break;                    \
 290        default: __ret = __put_user_bad(); break;                       \
 291        }                                                               \
 292        __ret;                                                          \
 293})
 294
 295/*
 296 * The versions of get_user and put_user without initial underscores
 297 * check the address of their arguments to make sure they are not
 298 * in kernel space.
 299 */
 300#define put_user(x, ptr)                                                \
 301({                                                                      \
 302        __typeof__(*(ptr)) __user *__Pu_addr = (ptr);                   \
 303        access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ?    \
 304                __put_user((x), (__Pu_addr)) :                          \
 305                -EFAULT;                                                \
 306})
 307
 308#define get_user(x, ptr)                                                \
 309({                                                                      \
 310        __typeof__(*(ptr)) const __user *__Gu_addr = (ptr);             \
 311        access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ?     \
 312                __get_user((x), (__Gu_addr)) :                          \
 313                ((x) = 0, -EFAULT);                                     \
 314})
 315
 316extern unsigned long __must_check
 317raw_copy_to_user(void __user *to, const void *from, unsigned long n);
 318extern unsigned long __must_check
 319raw_copy_from_user(void *to, const void __user *from, unsigned long n);
 320#define INLINE_COPY_FROM_USER
 321#define INLINE_COPY_TO_USER
 322
 323#ifdef __tilegx__
 324extern unsigned long raw_copy_in_user(
 325        void __user *to, const void __user *from, unsigned long n);
 326#endif
 327
 328
 329extern long strnlen_user(const char __user *str, long n);
 330extern long strlen_user(const char __user *str);
 331extern long strncpy_from_user(char *dst, const char __user *src, long);
 332
 333/**
 334 * clear_user: - Zero a block of memory in user space.
 335 * @mem:   Destination address, in user space.
 336 * @len:   Number of bytes to zero.
 337 *
 338 * Zero a block of memory in user space.
 339 *
 340 * Returns number of bytes that could not be cleared.
 341 * On success, this will be zero.
 342 */
 343extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
 344static inline unsigned long __must_check __clear_user(
 345        void __user *mem, unsigned long len)
 346{
 347        might_fault();
 348        return clear_user_asm(mem, len);
 349}
 350static inline unsigned long __must_check clear_user(
 351        void __user *mem, unsigned long len)
 352{
 353        if (access_ok(VERIFY_WRITE, mem, len))
 354                return __clear_user(mem, len);
 355        return len;
 356}
 357
 358/**
 359 * flush_user: - Flush a block of memory in user space from cache.
 360 * @mem:   Destination address, in user space.
 361 * @len:   Number of bytes to flush.
 362 *
 363 * Returns number of bytes that could not be flushed.
 364 * On success, this will be zero.
 365 */
 366extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
 367static inline unsigned long __must_check __flush_user(
 368        void __user *mem, unsigned long len)
 369{
 370        int retval;
 371
 372        might_fault();
 373        retval = flush_user_asm(mem, len);
 374        mb_incoherent();
 375        return retval;
 376}
 377
 378static inline unsigned long __must_check flush_user(
 379        void __user *mem, unsigned long len)
 380{
 381        if (access_ok(VERIFY_WRITE, mem, len))
 382                return __flush_user(mem, len);
 383        return len;
 384}
 385
 386/**
 387 * finv_user: - Flush-inval a block of memory in user space from cache.
 388 * @mem:   Destination address, in user space.
 389 * @len:   Number of bytes to invalidate.
 390 *
 391 * Returns number of bytes that could not be flush-invalidated.
 392 * On success, this will be zero.
 393 */
 394extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
 395static inline unsigned long __must_check __finv_user(
 396        void __user *mem, unsigned long len)
 397{
 398        int retval;
 399
 400        might_fault();
 401        retval = finv_user_asm(mem, len);
 402        mb_incoherent();
 403        return retval;
 404}
 405static inline unsigned long __must_check finv_user(
 406        void __user *mem, unsigned long len)
 407{
 408        if (access_ok(VERIFY_WRITE, mem, len))
 409                return __finv_user(mem, len);
 410        return len;
 411}
 412
 413#endif /* _ASM_TILE_UACCESS_H */
 414