linux/arch/sparc/include/asm/uaccess_64.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_UACCESS_H
   3#define _ASM_UACCESS_H
   4
   5/*
   6 * User space memory access functions
   7 */
   8
   9#include <linux/compiler.h>
  10#include <linux/string.h>
  11#include <asm/asi.h>
  12#include <asm/spitfire.h>
  13#include <asm/extable_64.h>
  14
  15#include <asm/processor.h>
  16
  17/*
  18 * Sparc64 is segmented, though more like the M68K than the I386.
  19 * We use the secondary ASI to address user memory, which references a
  20 * completely different VM map, thus there is zero chance of the user
  21 * doing something queer and tricking us into poking kernel memory.
  22 *
  23 * What is left here is basically what is needed for the other parts of
  24 * the kernel that expect to be able to manipulate, erum, "segments".
  25 * Or perhaps more properly, permissions.
  26 *
  27 * "For historical reasons, these macros are grossly misnamed." -Linus
  28 */
  29
  30#define KERNEL_DS   ((mm_segment_t) { ASI_P })
  31#define USER_DS     ((mm_segment_t) { ASI_AIUS })       /* har har har */
  32
  33#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
  34#define get_ds() (KERNEL_DS)
  35
  36#define segment_eq(a, b)  ((a).seg == (b).seg)
  37
  38#define set_fs(val)                                                             \
  39do {                                                                            \
  40        current_thread_info()->current_ds = (val).seg;                          \
  41        __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));        \
  42} while(0)
  43
  44/*
  45 * Test whether a block of memory is a valid user space address.
  46 * Returns 0 if the range is valid, nonzero otherwise.
  47 */
  48static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
  49{
  50        if (__builtin_constant_p(size))
  51                return addr > limit - size;
  52
  53        addr += size;
  54        if (addr < size)
  55                return true;
  56
  57        return addr > limit;
  58}
  59
  60#define __range_not_ok(addr, size, limit)                               \
  61({                                                                      \
  62        __chk_user_ptr(addr);                                           \
  63        __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
  64})
  65
  66static inline int __access_ok(const void __user * addr, unsigned long size)
  67{
  68        return 1;
  69}
  70
  71static inline int access_ok(const void __user * addr, unsigned long size)
  72{
  73        return 1;
  74}
  75
  76void __retl_efault(void);
  77
  78/* Uh, these should become the main single-value transfer routines..
  79 * They automatically use the right size if we just have the right
  80 * pointer type..
  81 *
  82 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  83 * and yet we don't want to do any pointers, because that is too much
  84 * of a performance impact. Thus we have a few rather ugly macros here,
  85 * and hide all the ugliness from the user.
  86 */
  87#define put_user(x, ptr) ({ \
  88        unsigned long __pu_addr = (unsigned long)(ptr); \
  89        __chk_user_ptr(ptr); \
  90        __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
  91})
  92
  93#define get_user(x, ptr) ({ \
  94        unsigned long __gu_addr = (unsigned long)(ptr); \
  95        __chk_user_ptr(ptr); \
  96        __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
  97})
  98
  99#define __put_user(x, ptr) put_user(x, ptr)
 100#define __get_user(x, ptr) get_user(x, ptr)
 101
 102struct __large_struct { unsigned long buf[100]; };
 103#define __m(x) ((struct __large_struct *)(x))
 104
 105#define __put_user_nocheck(data, addr, size) ({                 \
 106        register int __pu_ret;                                  \
 107        switch (size) {                                         \
 108        case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
 109        case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
 110        case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
 111        case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
 112        default: __pu_ret = __put_user_bad(); break;            \
 113        }                                                       \
 114        __pu_ret;                                               \
 115})
 116
 117#define __put_user_asm(x, size, addr, ret)                              \
 118__asm__ __volatile__(                                                   \
 119                "/* Put user asm, inline. */\n"                         \
 120        "1:\t"  "st"#size "a %1, [%2] %%asi\n\t"                        \
 121                "clr    %0\n"                                           \
 122        "2:\n\n\t"                                                      \
 123                ".section .fixup,#alloc,#execinstr\n\t"                 \
 124                ".align 4\n"                                            \
 125        "3:\n\t"                                                        \
 126                "sethi  %%hi(2b), %0\n\t"                               \
 127                "jmpl   %0 + %%lo(2b), %%g0\n\t"                        \
 128                " mov   %3, %0\n\n\t"                                   \
 129                ".previous\n\t"                                         \
 130                ".section __ex_table,\"a\"\n\t"                         \
 131                ".align 4\n\t"                                          \
 132                ".word  1b, 3b\n\t"                                     \
 133                ".previous\n\n\t"                                       \
 134               : "=r" (ret) : "r" (x), "r" (__m(addr)),                 \
 135                 "i" (-EFAULT))
 136
 137int __put_user_bad(void);
 138
 139#define __get_user_nocheck(data, addr, size, type) ({                        \
 140        register int __gu_ret;                                               \
 141        register unsigned long __gu_val;                                     \
 142        switch (size) {                                                      \
 143                case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
 144                case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
 145                case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
 146                case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
 147                default:                                                     \
 148                        __gu_val = 0;                                        \
 149                        __gu_ret = __get_user_bad();                         \
 150                        break;                                               \
 151        }                                                                    \
 152        data = (__force type) __gu_val;                                      \
 153         __gu_ret;                                                           \
 154})
 155
 156#define __get_user_asm(x, size, addr, ret)                              \
 157__asm__ __volatile__(                                                   \
 158                "/* Get user asm, inline. */\n"                         \
 159        "1:\t"  "ld"#size "a [%2] %%asi, %1\n\t"                        \
 160                "clr    %0\n"                                           \
 161        "2:\n\n\t"                                                      \
 162                ".section .fixup,#alloc,#execinstr\n\t"                 \
 163                ".align 4\n"                                            \
 164        "3:\n\t"                                                        \
 165                "sethi  %%hi(2b), %0\n\t"                               \
 166                "clr    %1\n\t"                                         \
 167                "jmpl   %0 + %%lo(2b), %%g0\n\t"                        \
 168                " mov   %3, %0\n\n\t"                                   \
 169                ".previous\n\t"                                         \
 170                ".section __ex_table,\"a\"\n\t"                         \
 171                ".align 4\n\t"                                          \
 172                ".word  1b, 3b\n\n\t"                                   \
 173                ".previous\n\t"                                         \
 174               : "=r" (ret), "=r" (x) : "r" (__m(addr)),                \
 175                 "i" (-EFAULT))
 176
 177int __get_user_bad(void);
 178
 179unsigned long __must_check raw_copy_from_user(void *to,
 180                                             const void __user *from,
 181                                             unsigned long size);
 182
 183unsigned long __must_check raw_copy_to_user(void __user *to,
 184                                           const void *from,
 185                                           unsigned long size);
 186#define INLINE_COPY_FROM_USER
 187#define INLINE_COPY_TO_USER
 188
 189unsigned long __must_check raw_copy_in_user(void __user *to,
 190                                           const void __user *from,
 191                                           unsigned long size);
 192
 193unsigned long __must_check __clear_user(void __user *, unsigned long);
 194
 195#define clear_user __clear_user
 196
 197__must_check long strnlen_user(const char __user *str, long n);
 198
 199struct pt_regs;
 200unsigned long compute_effective_address(struct pt_regs *,
 201                                        unsigned int insn,
 202                                        unsigned int rd);
 203
 204#endif /* _ASM_UACCESS_H */
 205