linux/include/asm-sparc/uaccess.h
<<
>>
Prefs
   1/* $Id: uaccess.h,v 1.24 2001/10/30 04:32:24 davem Exp $
   2 * uaccess.h: User space memore access functions.
   3 *
   4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   5 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   6 */
   7#ifndef _ASM_UACCESS_H
   8#define _ASM_UACCESS_H
   9
  10#ifdef __KERNEL__
  11#include <linux/compiler.h>
  12#include <linux/sched.h>
  13#include <linux/string.h>
  14#include <linux/errno.h>
  15#include <asm/vac-ops.h>
  16#include <asm/a.out.h>
  17#endif
  18
  19#ifndef __ASSEMBLY__
  20
  21/* Sparc is not segmented, however we need to be able to fool access_ok()
  22 * when doing system calls from kernel mode legitimately.
  23 *
  24 * "For historical reasons, these macros are grossly misnamed." -Linus
  25 */
  26
  27#define KERNEL_DS   ((mm_segment_t) { 0 })
  28#define USER_DS     ((mm_segment_t) { -1 })
  29
  30#define VERIFY_READ     0
  31#define VERIFY_WRITE    1
  32
  33#define get_ds()        (KERNEL_DS)
  34#define get_fs()        (current->thread.current_ds)
  35#define set_fs(val)     ((current->thread.current_ds) = (val))
  36
  37#define segment_eq(a,b) ((a).seg == (b).seg)
  38
  39/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
  40 * can be fairly lightweight.
  41 * No one can read/write anything from userland in the kernel space by setting
  42 * large size and address near to PAGE_OFFSET - a fault will break his intentions.
  43 */
  44#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
  45#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
  46#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
  47#define access_ok(type, addr, size)                                     \
  48        ({ (void)(type); __access_ok((unsigned long)(addr), size); })
  49
  50/*
  51 * The exception table consists of pairs of addresses: the first is the
  52 * address of an instruction that is allowed to fault, and the second is
  53 * the address at which the program should continue.  No registers are
  54 * modified, so it is entirely up to the continuation code to figure out
  55 * what to do.
  56 *
  57 * All the routines below use bits of fixup code that are out of line
  58 * with the main instruction path.  This means when everything is well,
  59 * we don't even have to jump over them.  Further, they do not intrude
  60 * on our cache or tlb entries.
  61 *
  62 * There is a special way how to put a range of potentially faulting
  63 * insns (like twenty ldd/std's with now intervening other instructions)
  64 * You specify address of first in insn and 0 in fixup and in the next
  65 * exception_table_entry you specify last potentially faulting insn + 1
  66 * and in fixup the routine which should handle the fault.
  67 * That fixup code will get
  68 * (faulting_insn_address - first_insn_in_the_range_address)/4
  69 * in %g2 (ie. index of the faulting instruction in the range).
  70 */
  71
  72struct exception_table_entry
  73{
  74        unsigned long insn, fixup;
  75};
  76
  77/* Returns 0 if exception not found and fixup otherwise.  */
  78extern unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
  79
  80extern void __ret_efault(void);
  81
  82/* Uh, these should become the main single-value transfer routines..
  83 * They automatically use the right size if we just have the right
  84 * pointer type..
  85 *
  86 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  87 * and yet we don't want to do any pointers, because that is too much
  88 * of a performance impact. Thus we have a few rather ugly macros here,
  89 * and hide all the ugliness from the user.
  90 */
  91#define put_user(x,ptr) ({ \
  92unsigned long __pu_addr = (unsigned long)(ptr); \
  93__chk_user_ptr(ptr); \
  94__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
  95
  96#define get_user(x,ptr) ({ \
  97unsigned long __gu_addr = (unsigned long)(ptr); \
  98__chk_user_ptr(ptr); \
  99__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
 100
 101/*
 102 * The "__xxx" versions do not do address space checking, useful when
 103 * doing multiple accesses to the same area (the user has to do the
 104 * checks by hand with "access_ok()")
 105 */
 106#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
 107#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr)))
 108
 109struct __large_struct { unsigned long buf[100]; };
 110#define __m(x) ((struct __large_struct __user *)(x))
 111
 112#define __put_user_check(x,addr,size) ({ \
 113register int __pu_ret; \
 114if (__access_ok(addr,size)) { \
 115switch (size) { \
 116case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
 117case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
 118case 4: __put_user_asm(x,,addr,__pu_ret); break; \
 119case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
 120default: __pu_ret = __put_user_bad(); break; \
 121} } else { __pu_ret = -EFAULT; } __pu_ret; })
 122
 123#define __put_user_nocheck(x,addr,size) ({ \
 124register int __pu_ret; \
 125switch (size) { \
 126case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
 127case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
 128case 4: __put_user_asm(x,,addr,__pu_ret); break; \
 129case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
 130default: __pu_ret = __put_user_bad(); break; \
 131} __pu_ret; })
 132
 133#define __put_user_asm(x,size,addr,ret)                                 \
 134__asm__ __volatile__(                                                   \
 135        "/* Put user asm, inline. */\n"                                 \
 136"1:\t"  "st"#size " %1, %2\n\t"                                         \
 137        "clr    %0\n"                                                   \
 138"2:\n\n\t"                                                              \
 139        ".section .fixup,#alloc,#execinstr\n\t"                         \
 140        ".align 4\n"                                                    \
 141"3:\n\t"                                                                \
 142        "b      2b\n\t"                                                 \
 143        " mov   %3, %0\n\t"                                             \
 144        ".previous\n\n\t"                                               \
 145        ".section __ex_table,#alloc\n\t"                                \
 146        ".align 4\n\t"                                                  \
 147        ".word  1b, 3b\n\t"                                             \
 148        ".previous\n\n\t"                                               \
 149       : "=&r" (ret) : "r" (x), "m" (*__m(addr)),                       \
 150         "i" (-EFAULT))
 151
 152extern int __put_user_bad(void);
 153
 154#define __get_user_check(x,addr,size,type) ({ \
 155register int __gu_ret; \
 156register unsigned long __gu_val; \
 157if (__access_ok(addr,size)) { \
 158switch (size) { \
 159case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
 160case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
 161case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
 162case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
 163default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
 164} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; })
 165
 166#define __get_user_check_ret(x,addr,size,type,retval) ({ \
 167register unsigned long __gu_val __asm__ ("l1"); \
 168if (__access_ok(addr,size)) { \
 169switch (size) { \
 170case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
 171case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
 172case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
 173case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
 174default: if (__get_user_bad()) return retval; \
 175} x = (type) __gu_val; } else return retval; })
 176
 177#define __get_user_nocheck(x,addr,size,type) ({ \
 178register int __gu_ret; \
 179register unsigned long __gu_val; \
 180switch (size) { \
 181case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
 182case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
 183case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
 184case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
 185default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
 186} x = (type) __gu_val; __gu_ret; })
 187
 188#define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \
 189register unsigned long __gu_val __asm__ ("l1"); \
 190switch (size) { \
 191case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
 192case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
 193case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
 194case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
 195default: if (__get_user_bad()) return retval; \
 196} x = (type) __gu_val; })
 197
 198#define __get_user_asm(x,size,addr,ret)                                 \
 199__asm__ __volatile__(                                                   \
 200        "/* Get user asm, inline. */\n"                                 \
 201"1:\t"  "ld"#size " %2, %1\n\t"                                         \
 202        "clr    %0\n"                                                   \
 203"2:\n\n\t"                                                              \
 204        ".section .fixup,#alloc,#execinstr\n\t"                         \
 205        ".align 4\n"                                                    \
 206"3:\n\t"                                                                \
 207        "clr    %1\n\t"                                                 \
 208        "b      2b\n\t"                                                 \
 209        " mov   %3, %0\n\n\t"                                           \
 210        ".previous\n\t"                                                 \
 211        ".section __ex_table,#alloc\n\t"                                \
 212        ".align 4\n\t"                                                  \
 213        ".word  1b, 3b\n\n\t"                                           \
 214        ".previous\n\t"                                                 \
 215       : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),                     \
 216         "i" (-EFAULT))
 217
 218#define __get_user_asm_ret(x,size,addr,retval)                          \
 219if (__builtin_constant_p(retval) && retval == -EFAULT)                  \
 220__asm__ __volatile__(                                                   \
 221        "/* Get user asm ret, inline. */\n"                             \
 222"1:\t"  "ld"#size " %1, %0\n\n\t"                                       \
 223        ".section __ex_table,#alloc\n\t"                                \
 224        ".align 4\n\t"                                                  \
 225        ".word  1b,__ret_efault\n\n\t"                                  \
 226        ".previous\n\t"                                                 \
 227       : "=&r" (x) : "m" (*__m(addr)));                                 \
 228else                                                                    \
 229__asm__ __volatile__(                                                   \
 230        "/* Get user asm ret, inline. */\n"                             \
 231"1:\t"  "ld"#size " %1, %0\n\n\t"                                       \
 232        ".section .fixup,#alloc,#execinstr\n\t"                         \
 233        ".align 4\n"                                                    \
 234"3:\n\t"                                                                \
 235        "ret\n\t"                                                       \
 236        " restore %%g0, %2, %%o0\n\n\t"                                 \
 237        ".previous\n\t"                                                 \
 238        ".section __ex_table,#alloc\n\t"                                \
 239        ".align 4\n\t"                                                  \
 240        ".word  1b, 3b\n\n\t"                                           \
 241        ".previous\n\t"                                                 \
 242       : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
 243
 244extern int __get_user_bad(void);
 245
 246extern unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
 247
 248static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
 249{
 250        if (n && __access_ok((unsigned long) to, n))
 251                return __copy_user(to, (__force void __user *) from, n);
 252        else
 253                return n;
 254}
 255
 256static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 257{
 258        return __copy_user(to, (__force void __user *) from, n);
 259}
 260
 261static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
 262{
 263        if (n && __access_ok((unsigned long) from, n))
 264                return __copy_user((__force void __user *) to, from, n);
 265        else
 266                return n;
 267}
 268
 269static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
 270{
 271        return __copy_user((__force void __user *) to, from, n);
 272}
 273
 274#define __copy_to_user_inatomic __copy_to_user
 275#define __copy_from_user_inatomic __copy_from_user
 276
 277static inline unsigned long __clear_user(void __user *addr, unsigned long size)
 278{
 279        unsigned long ret;
 280
 281        __asm__ __volatile__ (
 282                ".section __ex_table,#alloc\n\t"
 283                ".align 4\n\t"
 284                ".word 1f,3\n\t"
 285                ".previous\n\t"
 286                "mov %2, %%o1\n"
 287                "1:\n\t"
 288                "call __bzero\n\t"
 289                " mov %1, %%o0\n\t"
 290                "mov %%o0, %0\n"
 291                : "=r" (ret) : "r" (addr), "r" (size) :
 292                "o0", "o1", "o2", "o3", "o4", "o5", "o7",
 293                "g1", "g2", "g3", "g4", "g5", "g7", "cc");
 294
 295        return ret;
 296}
 297
 298static inline unsigned long clear_user(void __user *addr, unsigned long n)
 299{
 300        if (n && __access_ok((unsigned long) addr, n))
 301                return __clear_user(addr, n);
 302        else
 303                return n;
 304}
 305
 306extern long __strncpy_from_user(char *dest, const char __user *src, long count);
 307
 308static inline long strncpy_from_user(char *dest, const char __user *src, long count)
 309{
 310        if (__access_ok((unsigned long) src, count))
 311                return __strncpy_from_user(dest, src, count);
 312        else
 313                return -EFAULT;
 314}
 315
 316extern long __strlen_user(const char __user *);
 317extern long __strnlen_user(const char __user *, long len);
 318
 319static inline long strlen_user(const char __user *str)
 320{
 321        if (!access_ok(VERIFY_READ, str, 0))
 322                return 0;
 323        else
 324                return __strlen_user(str);
 325}
 326
 327static inline long strnlen_user(const char __user *str, long len)
 328{
 329        if (!access_ok(VERIFY_READ, str, 0))
 330                return 0;
 331        else
 332                return __strnlen_user(str, len);
 333}
 334
 335#endif  /* __ASSEMBLY__ */
 336
 337#endif /* _ASM_UACCESS_H */
 338