linux/arch/xtensa/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 * include/asm-xtensa/uaccess.h
   3 *
   4 * User space memory access functions
   5 *
   6 * These routines provide basic accessing functions to the user memory
   7 * space for the kernel. This header file provides functions such as:
   8 *
   9 * This file is subject to the terms and conditions of the GNU General Public
  10 * License.  See the file "COPYING" in the main directory of this archive
  11 * for more details.
  12 *
  13 * Copyright (C) 2001 - 2005 Tensilica Inc.
  14 */
  15
  16#ifndef _XTENSA_UACCESS_H
  17#define _XTENSA_UACCESS_H
  18
  19#include <linux/prefetch.h>
  20#include <asm/types.h>
  21#include <asm/extable.h>
  22
  23/*
  24 * The fs value determines whether argument validity checking should
  25 * be performed or not.  If get_fs() == USER_DS, checking is
  26 * performed, with get_fs() == KERNEL_DS, checking is bypassed.
  27 *
  28 * For historical reasons (Data Segment Register?), these macros are
  29 * grossly misnamed.
  30 */
  31
  32#define KERNEL_DS       ((mm_segment_t) { 0 })
  33#define USER_DS         ((mm_segment_t) { 1 })
  34
  35#define get_fs()        (current->thread.current_ds)
  36#define set_fs(val)     (current->thread.current_ds = (val))
  37
  38#define segment_eq(a, b)        ((a).seg == (b).seg)
  39
  40#define __kernel_ok (uaccess_kernel())
  41#define __user_ok(addr, size) \
  42                (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
  43#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
  44#define access_ok(addr, size) __access_ok((unsigned long)(addr), (size))
  45
  46#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
  47
  48/*
  49 * These are the main single-value transfer routines.  They
  50 * automatically use the right size if we just have the right pointer
  51 * type.
  52 *
  53 * This gets kind of ugly. We want to return _two_ values in
  54 * "get_user()" and yet we don't want to do any pointers, because that
  55 * is too much of a performance impact. Thus we have a few rather ugly
  56 * macros here, and hide all the uglyness from the user.
  57 *
  58 * Careful to not
  59 * (a) re-use the arguments for side effects (sizeof is ok)
  60 * (b) require any knowledge of processes at this stage
  61 */
  62#define put_user(x, ptr)        __put_user_check((x), (ptr), sizeof(*(ptr)))
  63#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
  64
  65/*
  66 * The "__xxx" versions of the user access functions are versions that
  67 * do not verify the address space, that must have been done previously
  68 * with a separate "access_ok()" call (this is used when we do multiple
  69 * accesses to the same area of user memory).
  70 */
  71#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  72#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  73
  74
  75extern long __put_user_bad(void);
  76
  77#define __put_user_nocheck(x, ptr, size)                \
  78({                                                      \
  79        long __pu_err;                                  \
  80        __put_user_size((x), (ptr), (size), __pu_err);  \
  81        __pu_err;                                       \
  82})
  83
  84#define __put_user_check(x, ptr, size)                                  \
  85({                                                                      \
  86        long __pu_err = -EFAULT;                                        \
  87        __typeof__(*(ptr)) *__pu_addr = (ptr);                          \
  88        if (access_ok(__pu_addr, size))                 \
  89                __put_user_size((x), __pu_addr, (size), __pu_err);      \
  90        __pu_err;                                                       \
  91})
  92
  93#define __put_user_size(x, ptr, size, retval)                           \
  94do {                                                                    \
  95        int __cb;                                                       \
  96        retval = 0;                                                     \
  97        switch (size) {                                                 \
  98        case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb);  break; \
  99        case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \
 100        case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
 101        case 8: {                                                       \
 102                     __typeof__(*ptr) __v64 = x;                        \
 103                     retval = __copy_to_user(ptr, &__v64, 8);           \
 104                     break;                                             \
 105                }                                                       \
 106        default: __put_user_bad();                                      \
 107        }                                                               \
 108} while (0)
 109
 110
 111/*
 112 * Consider a case of a user single load/store would cause both an
 113 * unaligned exception and an MMU-related exception (unaligned
 114 * exceptions happen first):
 115 *
 116 * User code passes a bad variable ptr to a system call.
 117 * Kernel tries to access the variable.
 118 * Unaligned exception occurs.
 119 * Unaligned exception handler tries to make aligned accesses.
 120 * Double exception occurs for MMU-related cause (e.g., page not mapped).
 121 * do_page_fault() thinks the fault address belongs to the kernel, not the
 122 * user, and panics.
 123 *
 124 * The kernel currently prohibits user unaligned accesses.  We use the
 125 * __check_align_* macros to check for unaligned addresses before
 126 * accessing user space so we don't crash the kernel.  Both
 127 * __put_user_asm and __get_user_asm use these alignment macros, so
 128 * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
 129 * sync.
 130 */
 131
 132#define __check_align_1  ""
 133
 134#define __check_align_2                         \
 135        "   _bbci.l %3,  0, 1f          \n"     \
 136        "   movi    %0, %4              \n"     \
 137        "   _j      2f                  \n"
 138
 139#define __check_align_4                         \
 140        "   _bbsi.l %3,  0, 0f          \n"     \
 141        "   _bbci.l %3,  1, 1f          \n"     \
 142        "0: movi    %0, %4              \n"     \
 143        "   _j      2f                  \n"
 144
 145
 146/*
 147 * We don't tell gcc that we are accessing memory, but this is OK
 148 * because we do not write to any memory gcc knows about, so there
 149 * are no aliasing issues.
 150 *
 151 * WARNING: If you modify this macro at all, verify that the
 152 * __check_align_* macros still work.
 153 */
 154#define __put_user_asm(x, addr, err, align, insn, cb)   \
 155__asm__ __volatile__(                                   \
 156        __check_align_##align                           \
 157        "1: "insn"  %2, %3, 0           \n"             \
 158        "2:                             \n"             \
 159        "   .section  .fixup,\"ax\"     \n"             \
 160        "   .align 4                    \n"             \
 161        "   .literal_position           \n"             \
 162        "5:                             \n"             \
 163        "   movi   %1, 2b               \n"             \
 164        "   movi   %0, %4               \n"             \
 165        "   jx     %1                   \n"             \
 166        "   .previous                   \n"             \
 167        "   .section  __ex_table,\"a\"  \n"             \
 168        "   .long       1b, 5b          \n"             \
 169        "   .previous"                                  \
 170        :"=r" (err), "=r" (cb)                          \
 171        :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
 172
 173#define __get_user_nocheck(x, ptr, size)                        \
 174({                                                              \
 175        long __gu_err, __gu_val;                                \
 176        __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
 177        (x) = (__force __typeof__(*(ptr)))__gu_val;             \
 178        __gu_err;                                               \
 179})
 180
 181#define __get_user_check(x, ptr, size)                                  \
 182({                                                                      \
 183        long __gu_err = -EFAULT, __gu_val = 0;                          \
 184        const __typeof__(*(ptr)) *__gu_addr = (ptr);                    \
 185        if (access_ok(__gu_addr, size))                 \
 186                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
 187        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 188        __gu_err;                                                       \
 189})
 190
 191extern long __get_user_bad(void);
 192
 193#define __get_user_size(x, ptr, size, retval)                           \
 194do {                                                                    \
 195        int __cb;                                                       \
 196        retval = 0;                                                     \
 197        switch (size) {                                                 \
 198        case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb);  break;\
 199        case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
 200        case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb);  break;\
 201        case 8: retval = __copy_from_user(&x, ptr, 8);    break;        \
 202        default: (x) = __get_user_bad();                                \
 203        }                                                               \
 204} while (0)
 205
 206
 207/*
 208 * WARNING: If you modify this macro at all, verify that the
 209 * __check_align_* macros still work.
 210 */
 211#define __get_user_asm(x, addr, err, align, insn, cb) \
 212__asm__ __volatile__(                   \
 213        __check_align_##align                   \
 214        "1: "insn"  %2, %3, 0           \n"     \
 215        "2:                             \n"     \
 216        "   .section  .fixup,\"ax\"     \n"     \
 217        "   .align 4                    \n"     \
 218        "   .literal_position           \n"     \
 219        "5:                             \n"     \
 220        "   movi   %1, 2b               \n"     \
 221        "   movi   %2, 0                \n"     \
 222        "   movi   %0, %4               \n"     \
 223        "   jx     %1                   \n"     \
 224        "   .previous                   \n"     \
 225        "   .section  __ex_table,\"a\"  \n"     \
 226        "   .long       1b, 5b          \n"     \
 227        "   .previous"                          \
 228        :"=r" (err), "=r" (cb), "=r" (x)        \
 229        :"r" (addr), "i" (-EFAULT), "0" (err))
 230
 231
 232/*
 233 * Copy to/from user space
 234 */
 235
 236extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
 237
 238static inline unsigned long
 239raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 240{
 241        prefetchw(to);
 242        return __xtensa_copy_user(to, (__force const void *)from, n);
 243}
 244static inline unsigned long
 245raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 246{
 247        prefetch(from);
 248        return __xtensa_copy_user((__force void *)to, from, n);
 249}
 250#define INLINE_COPY_FROM_USER
 251#define INLINE_COPY_TO_USER
 252
 253/*
 254 * We need to return the number of bytes not cleared.  Our memset()
 255 * returns zero if a problem occurs while accessing user-space memory.
 256 * In that event, return no memory cleared.  Otherwise, zero for
 257 * success.
 258 */
 259
 260static inline unsigned long
 261__xtensa_clear_user(void *addr, unsigned long size)
 262{
 263        if (!__memset(addr, 0, size))
 264                return size;
 265        return 0;
 266}
 267
 268static inline unsigned long
 269clear_user(void *addr, unsigned long size)
 270{
 271        if (access_ok(addr, size))
 272                return __xtensa_clear_user(addr, size);
 273        return size ? -EFAULT : 0;
 274}
 275
 276#define __clear_user  __xtensa_clear_user
 277
 278
 279#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
 280
 281extern long __strncpy_user(char *, const char *, long);
 282
 283static inline long
 284strncpy_from_user(char *dst, const char *src, long count)
 285{
 286        if (access_ok(src, 1))
 287                return __strncpy_user(dst, src, count);
 288        return -EFAULT;
 289}
 290#else
 291long strncpy_from_user(char *dst, const char *src, long count);
 292#endif
 293
 294/*
 295 * Return the size of a string (including the ending 0!)
 296 */
 297extern long __strnlen_user(const char *, long);
 298
 299static inline long strnlen_user(const char *str, long len)
 300{
 301        unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
 302
 303        if ((unsigned long)str > top)
 304                return 0;
 305        return __strnlen_user(str, len);
 306}
 307
 308#endif  /* _XTENSA_UACCESS_H */
 309