linux/arch/parisc/include/asm/uaccess.h
<<
>>
Prefs
   1#ifndef __PARISC_UACCESS_H
   2#define __PARISC_UACCESS_H
   3
   4/*
   5 * User space memory access functions
   6 */
   7#include <asm/page.h>
   8#include <asm/cache.h>
   9#include <asm/errno.h>
  10#include <asm-generic/uaccess-unaligned.h>
  11
  12#include <linux/bug.h>
  13#include <linux/string.h>
  14#include <linux/thread_info.h>
  15
  16#define VERIFY_READ 0
  17#define VERIFY_WRITE 1
  18
  19#define KERNEL_DS       ((mm_segment_t){0})
  20#define USER_DS         ((mm_segment_t){1})
  21
  22#define segment_eq(a, b) ((a).seg == (b).seg)
  23
  24#define get_ds()        (KERNEL_DS)
  25#define get_fs()        (current_thread_info()->addr_limit)
  26#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  27
  28/*
  29 * Note that since kernel addresses are in a separate address space on
  30 * parisc, we don't need to do anything for access_ok().
  31 * We just let the page fault handler do the right thing. This also means
  32 * that put_user is the same as __put_user, etc.
  33 */
  34
  35static inline long access_ok(int type, const void __user * addr,
  36                unsigned long size)
  37{
  38        return 1;
  39}
  40
  41#define put_user __put_user
  42#define get_user __get_user
  43
  44#if !defined(CONFIG_64BIT)
  45#define LDD_USER(ptr)           __get_user_asm64(ptr)
  46#define STD_USER(x, ptr)        __put_user_asm64(x, ptr)
  47#else
  48#define LDD_USER(ptr)           __get_user_asm("ldd", ptr)
  49#define STD_USER(x, ptr)        __put_user_asm("std", x, ptr)
  50#endif
  51
  52/*
  53 * The exception table contains two values: the first is the relative offset to
  54 * the address of the instruction that is allowed to fault, and the second is
  55 * the relative offset to the address of the fixup routine. Since relative
  56 * addresses are used, 32bit values are sufficient even on 64bit kernel.
  57 */
  58
  59#define ARCH_HAS_RELATIVE_EXTABLE
  60struct exception_table_entry {
  61        int insn;       /* relative address of insn that is allowed to fault. */
  62        int fixup;      /* relative address of fixup routine */
  63};
  64
  65#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
  66        ".section __ex_table,\"aw\"\n"                     \
  67        ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
  68        ".previous\n"
  69
  70/*
  71 * The page fault handler stores, in a per-cpu area, the following information
  72 * if a fixup routine is available.
  73 */
  74struct exception_data {
  75        unsigned long fault_ip;
  76        unsigned long fault_gp;
  77        unsigned long fault_space;
  78        unsigned long fault_addr;
  79};
  80
  81/*
  82 * load_sr2() preloads the space register %%sr2 - based on the value of
  83 * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
  84 * is 0), or with the current value of %%sr3 to access user space (USER_DS)
  85 * memory. The following __get_user_asm() and __put_user_asm() functions have
  86 * %%sr2 hard-coded to access the requested memory.
  87 */
  88#define load_sr2() \
  89        __asm__(" or,=  %0,%%r0,%%r0\n\t"       \
  90                " mfsp %%sr3,%0\n\t"            \
  91                " mtsp %0,%%sr2\n\t"            \
  92                : : "r"(get_fs()) : )
  93
  94#define __get_user(x, ptr)                               \
  95({                                                       \
  96        register long __gu_err __asm__ ("r8") = 0;       \
  97        register long __gu_val __asm__ ("r9") = 0;       \
  98                                                         \
  99        load_sr2();                                      \
 100        switch (sizeof(*(ptr))) {                        \
 101            case 1: __get_user_asm("ldb", ptr); break;   \
 102            case 2: __get_user_asm("ldh", ptr); break;   \
 103            case 4: __get_user_asm("ldw", ptr); break;   \
 104            case 8: LDD_USER(ptr);  break;               \
 105            default: BUILD_BUG(); break;                 \
 106        }                                                \
 107                                                         \
 108        (x) = (__force __typeof__(*(ptr))) __gu_val;     \
 109        __gu_err;                                        \
 110})
 111
 112#define __get_user_asm(ldx, ptr)                        \
 113        __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t"     \
 114                ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
 115                : "=r"(__gu_val), "=r"(__gu_err)        \
 116                : "r"(ptr), "1"(__gu_err)               \
 117                : "r1");
 118
 119#if !defined(CONFIG_64BIT)
 120
 121#define __get_user_asm64(ptr)                           \
 122        __asm__("\n1:\tldw 0(%%sr2,%2),%0"              \
 123                "\n2:\tldw 4(%%sr2,%2),%R0\n\t"         \
 124                ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
 125                ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
 126                : "=r"(__gu_val), "=r"(__gu_err)        \
 127                : "r"(ptr), "1"(__gu_err)               \
 128                : "r1");
 129
 130#endif /* !defined(CONFIG_64BIT) */
 131
 132
 133#define __put_user(x, ptr)                                      \
 134({                                                              \
 135        register long __pu_err __asm__ ("r8") = 0;              \
 136        __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);       \
 137                                                                \
 138        load_sr2();                                             \
 139        switch (sizeof(*(ptr))) {                               \
 140            case 1: __put_user_asm("stb", __x, ptr); break;     \
 141            case 2: __put_user_asm("sth", __x, ptr); break;     \
 142            case 4: __put_user_asm("stw", __x, ptr); break;     \
 143            case 8: STD_USER(__x, ptr); break;                  \
 144            default: BUILD_BUG(); break;                        \
 145        }                                                       \
 146                                                                \
 147        __pu_err;                                               \
 148})
 149
 150/*
 151 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
 152 * instead of writing. This is because they do not write to any memory
 153 * gcc knows about, so there are no aliasing issues. These macros must
 154 * also be aware that "fixup_put_user_skip_[12]" are executed in the
 155 * context of the fault, and any registers used there must be listed
 156 * as clobbers. In this case only "r1" is used by the current routines.
 157 * r8/r9 are already listed as err/val.
 158 */
 159
 160#define __put_user_asm(stx, x, ptr)                         \
 161        __asm__ __volatile__ (                              \
 162                "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t"         \
 163                ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
 164                : "=r"(__pu_err)                            \
 165                : "r"(ptr), "r"(x), "0"(__pu_err)           \
 166                : "r1")
 167
 168
 169#if !defined(CONFIG_64BIT)
 170
 171#define __put_user_asm64(__val, ptr) do {                   \
 172        __asm__ __volatile__ (                              \
 173                "\n1:\tstw %2,0(%%sr2,%1)"                  \
 174                "\n2:\tstw %R2,4(%%sr2,%1)\n\t"             \
 175                ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
 176                ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
 177                : "=r"(__pu_err)                            \
 178                : "r"(ptr), "r"(__val), "0"(__pu_err) \
 179                : "r1");                                    \
 180} while (0)
 181
 182#endif /* !defined(CONFIG_64BIT) */
 183
 184
 185/*
 186 * Complex access routines -- external declarations
 187 */
 188
 189extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
 190extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
 191extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
 192extern long strncpy_from_user(char *, const char __user *, long);
 193extern unsigned lclear_user(void __user *, unsigned long);
 194extern long lstrnlen_user(const char __user *, long);
 195/*
 196 * Complex access routines -- macros
 197 */
 198#define user_addr_max() (~0UL)
 199
 200#define strnlen_user lstrnlen_user
 201#define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
 202#define clear_user lclear_user
 203#define __clear_user lclear_user
 204
 205unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
 206                                          unsigned long len);
 207unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
 208                                          unsigned long len);
 209unsigned long copy_in_user(void __user *dst, const void __user *src,
 210                           unsigned long len);
 211#define __copy_in_user copy_in_user
 212#define __copy_to_user_inatomic __copy_to_user
 213#define __copy_from_user_inatomic __copy_from_user
 214
 215extern void __compiletime_error("usercopy buffer size is too small")
 216__bad_copy_user(void);
 217
 218static inline void copy_user_overflow(int size, unsigned long count)
 219{
 220        WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
 221}
 222
 223static __always_inline unsigned long __must_check
 224copy_from_user(void *to, const void __user *from, unsigned long n)
 225{
 226        int sz = __compiletime_object_size(to);
 227        unsigned long ret = n;
 228
 229        if (likely(sz < 0 || sz >= n)) {
 230                check_object_size(to, n, false);
 231                ret = __copy_from_user(to, from, n);
 232        } else if (!__builtin_constant_p(n))
 233                copy_user_overflow(sz, n);
 234        else
 235                __bad_copy_user();
 236
 237        if (unlikely(ret))
 238                memset(to + (n - ret), 0, ret);
 239
 240        return ret;
 241}
 242
 243static __always_inline unsigned long __must_check
 244copy_to_user(void __user *to, const void *from, unsigned long n)
 245{
 246        int sz = __compiletime_object_size(from);
 247
 248        if (likely(sz < 0 || sz >= n)) {
 249                check_object_size(from, n, true);
 250                n = __copy_to_user(to, from, n);
 251        } else if (!__builtin_constant_p(n))
 252                copy_user_overflow(sz, n);
 253        else
 254                __bad_copy_user();
 255
 256        return n;
 257}
 258
 259struct pt_regs;
 260int fixup_exception(struct pt_regs *regs);
 261
 262#endif /* __PARISC_UACCESS_H */
 263