linux/arch/sparc/include/asm/uaccess_64.h
<<
>>
Prefs
   1#ifndef _ASM_UACCESS_H
   2#define _ASM_UACCESS_H
   3
   4/*
   5 * User space memory access functions
   6 */
   7
   8#ifdef __KERNEL__
   9#include <linux/errno.h>
  10#include <linux/compiler.h>
  11#include <linux/string.h>
  12#include <linux/thread_info.h>
  13#include <asm/asi.h>
  14#include <asm/system.h>
  15#include <asm/spitfire.h>
  16#include <asm-generic/uaccess-unaligned.h>
  17#endif
  18
  19#ifndef __ASSEMBLY__
  20
  21/*
  22 * Sparc64 is segmented, though more like the M68K than the I386.
  23 * We use the secondary ASI to address user memory, which references a
  24 * completely different VM map, thus there is zero chance of the user
  25 * doing something queer and tricking us into poking kernel memory.
  26 *
  27 * What is left here is basically what is needed for the other parts of
  28 * the kernel that expect to be able to manipulate, erum, "segments".
  29 * Or perhaps more properly, permissions.
  30 *
  31 * "For historical reasons, these macros are grossly misnamed." -Linus
  32 */
  33
  34#define KERNEL_DS   ((mm_segment_t) { ASI_P })
  35#define USER_DS     ((mm_segment_t) { ASI_AIUS })       /* har har har */
  36
  37#define VERIFY_READ     0
  38#define VERIFY_WRITE    1
  39
  40#define get_fs() ((mm_segment_t) { get_thread_current_ds() })
  41#define get_ds() (KERNEL_DS)
  42
  43#define segment_eq(a,b)  ((a).seg == (b).seg)
  44
  45#define set_fs(val)                                                             \
  46do {                                                                            \
  47        set_thread_current_ds((val).seg);                                       \
  48        __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));        \
  49} while(0)
  50
  51static inline int __access_ok(const void __user * addr, unsigned long size)
  52{
  53        return 1;
  54}
  55
  56static inline int access_ok(int type, const void __user * addr, unsigned long size)
  57{
  58        return 1;
  59}
  60
  61/*
  62 * The exception table consists of pairs of addresses: the first is the
  63 * address of an instruction that is allowed to fault, and the second is
  64 * the address at which the program should continue.  No registers are
  65 * modified, so it is entirely up to the continuation code to figure out
  66 * what to do.
  67 *
  68 * All the routines below use bits of fixup code that are out of line
  69 * with the main instruction path.  This means when everything is well,
  70 * we don't even have to jump over them.  Further, they do not intrude
  71 * on our cache or tlb entries.
  72 */
  73
  74struct exception_table_entry {
  75        unsigned int insn, fixup;
  76};
  77
  78extern void __ret_efault(void);
  79extern void __retl_efault(void);
  80
  81/* Uh, these should become the main single-value transfer routines..
  82 * They automatically use the right size if we just have the right
  83 * pointer type..
  84 *
  85 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  86 * and yet we don't want to do any pointers, because that is too much
  87 * of a performance impact. Thus we have a few rather ugly macros here,
  88 * and hide all the ugliness from the user.
  89 */
  90#define put_user(x,ptr) ({ \
  91unsigned long __pu_addr = (unsigned long)(ptr); \
  92__chk_user_ptr(ptr); \
  93__put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
  94
  95#define get_user(x,ptr) ({ \
  96unsigned long __gu_addr = (unsigned long)(ptr); \
  97__chk_user_ptr(ptr); \
  98__get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
  99
 100#define __put_user(x,ptr) put_user(x,ptr)
 101#define __get_user(x,ptr) get_user(x,ptr)
 102
 103struct __large_struct { unsigned long buf[100]; };
 104#define __m(x) ((struct __large_struct *)(x))
 105
 106#define __put_user_nocheck(data,addr,size) ({ \
 107register int __pu_ret; \
 108switch (size) { \
 109case 1: __put_user_asm(data,b,addr,__pu_ret); break; \
 110case 2: __put_user_asm(data,h,addr,__pu_ret); break; \
 111case 4: __put_user_asm(data,w,addr,__pu_ret); break; \
 112case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
 113default: __pu_ret = __put_user_bad(); break; \
 114} __pu_ret; })
 115
 116#define __put_user_asm(x,size,addr,ret)                                 \
 117__asm__ __volatile__(                                                   \
 118        "/* Put user asm, inline. */\n"                                 \
 119"1:\t"  "st"#size "a %1, [%2] %%asi\n\t"                                \
 120        "clr    %0\n"                                                   \
 121"2:\n\n\t"                                                              \
 122        ".section .fixup,#alloc,#execinstr\n\t"                         \
 123        ".align 4\n"                                                    \
 124"3:\n\t"                                                                \
 125        "sethi  %%hi(2b), %0\n\t"                                       \
 126        "jmpl   %0 + %%lo(2b), %%g0\n\t"                                \
 127        " mov   %3, %0\n\n\t"                                           \
 128        ".previous\n\t"                                                 \
 129        ".section __ex_table,\"a\"\n\t"                                 \
 130        ".align 4\n\t"                                                  \
 131        ".word  1b, 3b\n\t"                                             \
 132        ".previous\n\n\t"                                               \
 133       : "=r" (ret) : "r" (x), "r" (__m(addr)),                         \
 134         "i" (-EFAULT))
 135
 136extern int __put_user_bad(void);
 137
 138#define __get_user_nocheck(data,addr,size,type) ({ \
 139register int __gu_ret; \
 140register unsigned long __gu_val; \
 141switch (size) { \
 142case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
 143case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
 144case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \
 145case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \
 146default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
 147} data = (type) __gu_val; __gu_ret; })
 148
 149#define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \
 150register unsigned long __gu_val __asm__ ("l1"); \
 151switch (size) { \
 152case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
 153case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
 154case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \
 155case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \
 156default: if (__get_user_bad()) return retval; \
 157} data = (type) __gu_val; })
 158
 159#define __get_user_asm(x,size,addr,ret)                                 \
 160__asm__ __volatile__(                                                   \
 161        "/* Get user asm, inline. */\n"                                 \
 162"1:\t"  "ld"#size "a [%2] %%asi, %1\n\t"                                \
 163        "clr    %0\n"                                                   \
 164"2:\n\n\t"                                                              \
 165        ".section .fixup,#alloc,#execinstr\n\t"                         \
 166        ".align 4\n"                                                    \
 167"3:\n\t"                                                                \
 168        "sethi  %%hi(2b), %0\n\t"                                       \
 169        "clr    %1\n\t"                                                 \
 170        "jmpl   %0 + %%lo(2b), %%g0\n\t"                                \
 171        " mov   %3, %0\n\n\t"                                           \
 172        ".previous\n\t"                                                 \
 173        ".section __ex_table,\"a\"\n\t"                                 \
 174        ".align 4\n\t"                                                  \
 175        ".word  1b, 3b\n\n\t"                                           \
 176        ".previous\n\t"                                                 \
 177       : "=r" (ret), "=r" (x) : "r" (__m(addr)),                        \
 178         "i" (-EFAULT))
 179
 180#define __get_user_asm_ret(x,size,addr,retval)                          \
 181if (__builtin_constant_p(retval) && retval == -EFAULT)                  \
 182__asm__ __volatile__(                                                   \
 183        "/* Get user asm ret, inline. */\n"                             \
 184"1:\t"  "ld"#size "a [%1] %%asi, %0\n\n\t"                              \
 185        ".section __ex_table,\"a\"\n\t"                                 \
 186        ".align 4\n\t"                                                  \
 187        ".word  1b,__ret_efault\n\n\t"                                  \
 188        ".previous\n\t"                                                 \
 189       : "=r" (x) : "r" (__m(addr)));                                   \
 190else                                                                    \
 191__asm__ __volatile__(                                                   \
 192        "/* Get user asm ret, inline. */\n"                             \
 193"1:\t"  "ld"#size "a [%1] %%asi, %0\n\n\t"                              \
 194        ".section .fixup,#alloc,#execinstr\n\t"                         \
 195        ".align 4\n"                                                    \
 196"3:\n\t"                                                                \
 197        "ret\n\t"                                                       \
 198        " restore %%g0, %2, %%o0\n\n\t"                                 \
 199        ".previous\n\t"                                                 \
 200        ".section __ex_table,\"a\"\n\t"                                 \
 201        ".align 4\n\t"                                                  \
 202        ".word  1b, 3b\n\n\t"                                           \
 203        ".previous\n\t"                                                 \
 204       : "=r" (x) : "r" (__m(addr)), "i" (retval))
 205
 206extern int __get_user_bad(void);
 207
 208extern unsigned long __must_check ___copy_from_user(void *to,
 209                                                    const void __user *from,
 210                                                    unsigned long size);
 211extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
 212                                          unsigned long size);
 213static inline unsigned long __must_check
 214copy_from_user(void *to, const void __user *from, unsigned long size)
 215{
 216        unsigned long ret = ___copy_from_user(to, from, size);
 217
 218        if (unlikely(ret))
 219                ret = copy_from_user_fixup(to, from, size);
 220
 221        return ret;
 222}
 223#define __copy_from_user copy_from_user
 224
 225extern unsigned long __must_check ___copy_to_user(void __user *to,
 226                                                  const void *from,
 227                                                  unsigned long size);
 228extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
 229                                        unsigned long size);
 230static inline unsigned long __must_check
 231copy_to_user(void __user *to, const void *from, unsigned long size)
 232{
 233        unsigned long ret = ___copy_to_user(to, from, size);
 234
 235        if (unlikely(ret))
 236                ret = copy_to_user_fixup(to, from, size);
 237        return ret;
 238}
 239#define __copy_to_user copy_to_user
 240
 241extern unsigned long __must_check ___copy_in_user(void __user *to,
 242                                                  const void __user *from,
 243                                                  unsigned long size);
 244extern unsigned long copy_in_user_fixup(void __user *to, void __user *from,
 245                                        unsigned long size);
 246static inline unsigned long __must_check
 247copy_in_user(void __user *to, void __user *from, unsigned long size)
 248{
 249        unsigned long ret = ___copy_in_user(to, from, size);
 250
 251        if (unlikely(ret))
 252                ret = copy_in_user_fixup(to, from, size);
 253        return ret;
 254}
 255#define __copy_in_user copy_in_user
 256
 257extern unsigned long __must_check __clear_user(void __user *, unsigned long);
 258
 259#define clear_user __clear_user
 260
 261extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count);
 262
 263#define strncpy_from_user __strncpy_from_user
 264
 265extern long __strlen_user(const char __user *);
 266extern long __strnlen_user(const char __user *, long len);
 267
 268#define strlen_user __strlen_user
 269#define strnlen_user __strnlen_user
 270#define __copy_to_user_inatomic ___copy_to_user
 271#define __copy_from_user_inatomic ___copy_from_user
 272
 273#endif  /* __ASSEMBLY__ */
 274
 275#endif /* _ASM_UACCESS_H */
 276