linux/arch/sparc/include/asm/uaccess_64.h
<<
>>
Prefs
   1#ifndef _ASM_UACCESS_H
   2#define _ASM_UACCESS_H
   3
   4/*
   5 * User space memory access functions
   6 */
   7
   8#ifdef __KERNEL__
   9#include <linux/errno.h>
  10#include <linux/compiler.h>
  11#include <linux/string.h>
  12#include <linux/thread_info.h>
  13#include <asm/asi.h>
  14#include <asm/spitfire.h>
  15#include <asm-generic/uaccess-unaligned.h>
  16#endif
  17
  18#ifndef __ASSEMBLY__
  19
  20#include <asm/processor.h>
  21
  22/*
  23 * Sparc64 is segmented, though more like the M68K than the I386.
  24 * We use the secondary ASI to address user memory, which references a
  25 * completely different VM map, thus there is zero chance of the user
  26 * doing something queer and tricking us into poking kernel memory.
  27 *
  28 * What is left here is basically what is needed for the other parts of
  29 * the kernel that expect to be able to manipulate, erum, "segments".
  30 * Or perhaps more properly, permissions.
  31 *
  32 * "For historical reasons, these macros are grossly misnamed." -Linus
  33 */
  34
  35#define KERNEL_DS   ((mm_segment_t) { ASI_P })
  36#define USER_DS     ((mm_segment_t) { ASI_AIUS })       /* har har har */
  37
  38#define VERIFY_READ     0
  39#define VERIFY_WRITE    1
  40
  41#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
  42#define get_ds() (KERNEL_DS)
  43
  44#define segment_eq(a, b)  ((a).seg == (b).seg)
  45
  46#define set_fs(val)                                                             \
  47do {                                                                            \
  48        current_thread_info()->current_ds = (val).seg;                          \
  49        __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));        \
  50} while(0)
  51
  52/*
  53 * Test whether a block of memory is a valid user space address.
  54 * Returns 0 if the range is valid, nonzero otherwise.
  55 */
  56static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
  57{
  58        if (__builtin_constant_p(size))
  59                return addr > limit - size;
  60
  61        addr += size;
  62        if (addr < size)
  63                return true;
  64
  65        return addr > limit;
  66}
  67
  68#define __range_not_ok(addr, size, limit)                               \
  69({                                                                      \
  70        __chk_user_ptr(addr);                                           \
  71        __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
  72})
  73
  74static inline int __access_ok(const void __user * addr, unsigned long size)
  75{
  76        return 1;
  77}
  78
  79static inline int access_ok(int type, const void __user * addr, unsigned long size)
  80{
  81        return 1;
  82}
  83
  84/*
  85 * The exception table consists of pairs of addresses: the first is the
  86 * address of an instruction that is allowed to fault, and the second is
  87 * the address at which the program should continue.  No registers are
  88 * modified, so it is entirely up to the continuation code to figure out
  89 * what to do.
  90 *
  91 * All the routines below use bits of fixup code that are out of line
  92 * with the main instruction path.  This means when everything is well,
  93 * we don't even have to jump over them.  Further, they do not intrude
  94 * on our cache or tlb entries.
  95 */
  96
  97struct exception_table_entry {
  98        unsigned int insn, fixup;
  99};
 100
 101void __ret_efault(void);
 102void __retl_efault(void);
 103
 104/* Uh, these should become the main single-value transfer routines..
 105 * They automatically use the right size if we just have the right
 106 * pointer type..
 107 *
 108 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 109 * and yet we don't want to do any pointers, because that is too much
 110 * of a performance impact. Thus we have a few rather ugly macros here,
 111 * and hide all the ugliness from the user.
 112 */
 113#define put_user(x, ptr) ({ \
 114        unsigned long __pu_addr = (unsigned long)(ptr); \
 115        __chk_user_ptr(ptr); \
 116        __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
 117})
 118
 119#define get_user(x, ptr) ({ \
 120        unsigned long __gu_addr = (unsigned long)(ptr); \
 121        __chk_user_ptr(ptr); \
 122        __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
 123})
 124
 125#define __put_user(x, ptr) put_user(x, ptr)
 126#define __get_user(x, ptr) get_user(x, ptr)
 127
 128struct __large_struct { unsigned long buf[100]; };
 129#define __m(x) ((struct __large_struct *)(x))
 130
 131#define __put_user_nocheck(data, addr, size) ({                 \
 132        register int __pu_ret;                                  \
 133        switch (size) {                                         \
 134        case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
 135        case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
 136        case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
 137        case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
 138        default: __pu_ret = __put_user_bad(); break;            \
 139        }                                                       \
 140        __pu_ret;                                               \
 141})
 142
 143#define __put_user_asm(x, size, addr, ret)                              \
 144__asm__ __volatile__(                                                   \
 145                "/* Put user asm, inline. */\n"                         \
 146        "1:\t"  "st"#size "a %1, [%2] %%asi\n\t"                        \
 147                "clr    %0\n"                                           \
 148        "2:\n\n\t"                                                      \
 149                ".section .fixup,#alloc,#execinstr\n\t"                 \
 150                ".align 4\n"                                            \
 151        "3:\n\t"                                                        \
 152                "sethi  %%hi(2b), %0\n\t"                               \
 153                "jmpl   %0 + %%lo(2b), %%g0\n\t"                        \
 154                " mov   %3, %0\n\n\t"                                   \
 155                ".previous\n\t"                                         \
 156                ".section __ex_table,\"a\"\n\t"                         \
 157                ".align 4\n\t"                                          \
 158                ".word  1b, 3b\n\t"                                     \
 159                ".previous\n\n\t"                                       \
 160               : "=r" (ret) : "r" (x), "r" (__m(addr)),                 \
 161                 "i" (-EFAULT))
 162
 163int __put_user_bad(void);
 164
 165#define __get_user_nocheck(data, addr, size, type) ({                        \
 166        register int __gu_ret;                                               \
 167        register unsigned long __gu_val;                                     \
 168        switch (size) {                                                      \
 169                case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
 170                case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
 171                case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
 172                case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
 173                default:                                                     \
 174                        __gu_val = 0;                                        \
 175                        __gu_ret = __get_user_bad();                         \
 176                        break;                                               \
 177        }                                                                    \
 178        data = (__force type) __gu_val;                                      \
 179         __gu_ret;                                                           \
 180})
 181
 182#define __get_user_nocheck_ret(data, addr, size, type, retval) ({       \
 183        register unsigned long __gu_val __asm__ ("l1");                 \
 184        switch (size) {                                                 \
 185        case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break;  \
 186        case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break;  \
 187        case 4: __get_user_asm_ret(__gu_val, uw, addr, retval); break;  \
 188        case 8: __get_user_asm_ret(__gu_val, x, addr, retval); break;   \
 189        default:                                                        \
 190                if (__get_user_bad())                                   \
 191                        return retval;                                  \
 192        }                                                               \
 193        data = (__force type) __gu_val;                                 \
 194})
 195
 196#define __get_user_asm(x, size, addr, ret)                              \
 197__asm__ __volatile__(                                                   \
 198                "/* Get user asm, inline. */\n"                         \
 199        "1:\t"  "ld"#size "a [%2] %%asi, %1\n\t"                        \
 200                "clr    %0\n"                                           \
 201        "2:\n\n\t"                                                      \
 202                ".section .fixup,#alloc,#execinstr\n\t"                 \
 203                ".align 4\n"                                            \
 204        "3:\n\t"                                                        \
 205                "sethi  %%hi(2b), %0\n\t"                               \
 206                "clr    %1\n\t"                                         \
 207                "jmpl   %0 + %%lo(2b), %%g0\n\t"                        \
 208                " mov   %3, %0\n\n\t"                                   \
 209                ".previous\n\t"                                         \
 210                ".section __ex_table,\"a\"\n\t"                         \
 211                ".align 4\n\t"                                          \
 212                ".word  1b, 3b\n\n\t"                                   \
 213                ".previous\n\t"                                         \
 214               : "=r" (ret), "=r" (x) : "r" (__m(addr)),                \
 215                 "i" (-EFAULT))
 216
 217#define __get_user_asm_ret(x, size, addr, retval)                       \
 218if (__builtin_constant_p(retval) && retval == -EFAULT)                  \
 219        __asm__ __volatile__(                                           \
 220                "/* Get user asm ret, inline. */\n"                     \
 221        "1:\t"  "ld"#size "a [%1] %%asi, %0\n\n\t"                      \
 222                ".section __ex_table,\"a\"\n\t"                         \
 223                ".align 4\n\t"                                          \
 224                ".word  1b,__ret_efault\n\n\t"                          \
 225                ".previous\n\t"                                         \
 226               : "=r" (x) : "r" (__m(addr)));                           \
 227else                                                                    \
 228        __asm__ __volatile__(                                           \
 229                "/* Get user asm ret, inline. */\n"                     \
 230        "1:\t"  "ld"#size "a [%1] %%asi, %0\n\n\t"                      \
 231                ".section .fixup,#alloc,#execinstr\n\t"                 \
 232                ".align 4\n"                                            \
 233        "3:\n\t"                                                        \
 234                "ret\n\t"                                               \
 235                " restore %%g0, %2, %%o0\n\n\t"                         \
 236                ".previous\n\t"                                         \
 237                ".section __ex_table,\"a\"\n\t"                         \
 238                ".align 4\n\t"                                          \
 239                ".word  1b, 3b\n\n\t"                                   \
 240                ".previous\n\t"                                         \
 241               : "=r" (x) : "r" (__m(addr)), "i" (retval))
 242
 243int __get_user_bad(void);
 244
 245unsigned long __must_check ___copy_from_user(void *to,
 246                                             const void __user *from,
 247                                             unsigned long size);
 248unsigned long copy_from_user_fixup(void *to, const void __user *from,
 249                                   unsigned long size);
 250static inline unsigned long __must_check
 251copy_from_user(void *to, const void __user *from, unsigned long size)
 252{
 253        unsigned long ret = ___copy_from_user(to, from, size);
 254
 255        if (unlikely(ret))
 256                ret = copy_from_user_fixup(to, from, size);
 257
 258        return ret;
 259}
 260#define __copy_from_user copy_from_user
 261
 262unsigned long __must_check ___copy_to_user(void __user *to,
 263                                           const void *from,
 264                                           unsigned long size);
 265unsigned long copy_to_user_fixup(void __user *to, const void *from,
 266                                 unsigned long size);
 267static inline unsigned long __must_check
 268copy_to_user(void __user *to, const void *from, unsigned long size)
 269{
 270        unsigned long ret = ___copy_to_user(to, from, size);
 271
 272        if (unlikely(ret))
 273                ret = copy_to_user_fixup(to, from, size);
 274        return ret;
 275}
 276#define __copy_to_user copy_to_user
 277
 278unsigned long __must_check ___copy_in_user(void __user *to,
 279                                           const void __user *from,
 280                                           unsigned long size);
 281unsigned long copy_in_user_fixup(void __user *to, void __user *from,
 282                                 unsigned long size);
 283static inline unsigned long __must_check
 284copy_in_user(void __user *to, void __user *from, unsigned long size)
 285{
 286        unsigned long ret = ___copy_in_user(to, from, size);
 287
 288        if (unlikely(ret))
 289                ret = copy_in_user_fixup(to, from, size);
 290        return ret;
 291}
 292#define __copy_in_user copy_in_user
 293
 294unsigned long __must_check __clear_user(void __user *, unsigned long);
 295
 296#define clear_user __clear_user
 297
 298__must_check long strlen_user(const char __user *str);
 299__must_check long strnlen_user(const char __user *str, long n);
 300
 301#define __copy_to_user_inatomic __copy_to_user
 302#define __copy_from_user_inatomic __copy_from_user
 303
 304struct pt_regs;
 305unsigned long compute_effective_address(struct pt_regs *,
 306                                        unsigned int insn,
 307                                        unsigned int rd);
 308
 309#endif  /* __ASSEMBLY__ */
 310
 311#endif /* _ASM_UACCESS_H */
 312