linux/arch/sparc/include/asm/uaccess_64.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_UACCESS_H
   3#define _ASM_UACCESS_H
   4
   5/*
   6 * User space memory access functions
   7 */
   8
   9#include <linux/compiler.h>
  10#include <linux/string.h>
  11#include <asm/asi.h>
  12#include <asm/spitfire.h>
  13#include <asm/extable_64.h>
  14
  15#include <asm/processor.h>
  16
  17/*
  18 * Sparc64 is segmented, though more like the M68K than the I386.
  19 * We use the secondary ASI to address user memory, which references a
  20 * completely different VM map, thus there is zero chance of the user
  21 * doing something queer and tricking us into poking kernel memory.
  22 *
  23 * What is left here is basically what is needed for the other parts of
  24 * the kernel that expect to be able to manipulate, erum, "segments".
  25 * Or perhaps more properly, permissions.
  26 *
  27 * "For historical reasons, these macros are grossly misnamed." -Linus
  28 */
  29
  30#define KERNEL_DS   ((mm_segment_t) { ASI_P })
  31#define USER_DS     ((mm_segment_t) { ASI_AIUS })       /* har har har */
  32
  33#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
  34
  35#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
  36
  37#define set_fs(val)                                                             \
  38do {                                                                            \
  39        current_thread_info()->current_ds = (val).seg;                          \
  40        __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));        \
  41} while(0)
  42
  43/*
  44 * Test whether a block of memory is a valid user space address.
  45 * Returns 0 if the range is valid, nonzero otherwise.
  46 */
  47static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
  48{
  49        if (__builtin_constant_p(size))
  50                return addr > limit - size;
  51
  52        addr += size;
  53        if (addr < size)
  54                return true;
  55
  56        return addr > limit;
  57}
  58
  59#define __range_not_ok(addr, size, limit)                               \
  60({                                                                      \
  61        __chk_user_ptr(addr);                                           \
  62        __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
  63})
  64
  65static inline int __access_ok(const void __user * addr, unsigned long size)
  66{
  67        return 1;
  68}
  69
  70static inline int access_ok(const void __user * addr, unsigned long size)
  71{
  72        return 1;
  73}
  74
  75void __retl_efault(void);
  76
  77/* Uh, these should become the main single-value transfer routines..
  78 * They automatically use the right size if we just have the right
  79 * pointer type..
  80 *
  81 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  82 * and yet we don't want to do any pointers, because that is too much
  83 * of a performance impact. Thus we have a few rather ugly macros here,
  84 * and hide all the ugliness from the user.
  85 */
  86#define put_user(x, ptr) ({ \
  87        unsigned long __pu_addr = (unsigned long)(ptr); \
  88        __chk_user_ptr(ptr); \
  89        __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
  90})
  91
  92#define get_user(x, ptr) ({ \
  93        unsigned long __gu_addr = (unsigned long)(ptr); \
  94        __chk_user_ptr(ptr); \
  95        __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
  96})
  97
  98#define __put_user(x, ptr) put_user(x, ptr)
  99#define __get_user(x, ptr) get_user(x, ptr)
 100
 101struct __large_struct { unsigned long buf[100]; };
 102#define __m(x) ((struct __large_struct *)(x))
 103
 104#define __put_user_nocheck(data, addr, size) ({                 \
 105        register int __pu_ret;                                  \
 106        switch (size) {                                         \
 107        case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
 108        case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
 109        case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
 110        case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
 111        default: __pu_ret = __put_user_bad(); break;            \
 112        }                                                       \
 113        __pu_ret;                                               \
 114})
 115
 116#define __put_user_asm(x, size, addr, ret)                              \
 117__asm__ __volatile__(                                                   \
 118                "/* Put user asm, inline. */\n"                         \
 119        "1:\t"  "st"#size "a %1, [%2] %%asi\n\t"                        \
 120                "clr    %0\n"                                           \
 121        "2:\n\n\t"                                                      \
 122                ".section .fixup,#alloc,#execinstr\n\t"                 \
 123                ".align 4\n"                                            \
 124        "3:\n\t"                                                        \
 125                "sethi  %%hi(2b), %0\n\t"                               \
 126                "jmpl   %0 + %%lo(2b), %%g0\n\t"                        \
 127                " mov   %3, %0\n\n\t"                                   \
 128                ".previous\n\t"                                         \
 129                ".section __ex_table,\"a\"\n\t"                         \
 130                ".align 4\n\t"                                          \
 131                ".word  1b, 3b\n\t"                                     \
 132                ".previous\n\n\t"                                       \
 133               : "=r" (ret) : "r" (x), "r" (__m(addr)),                 \
 134                 "i" (-EFAULT))
 135
 136int __put_user_bad(void);
 137
 138#define __get_user_nocheck(data, addr, size, type) ({                        \
 139        register int __gu_ret;                                               \
 140        register unsigned long __gu_val;                                     \
 141        switch (size) {                                                      \
 142                case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
 143                case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
 144                case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
 145                case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
 146                default:                                                     \
 147                        __gu_val = 0;                                        \
 148                        __gu_ret = __get_user_bad();                         \
 149                        break;                                               \
 150        }                                                                    \
 151        data = (__force type) __gu_val;                                      \
 152         __gu_ret;                                                           \
 153})
 154
 155#define __get_user_asm(x, size, addr, ret)                              \
 156__asm__ __volatile__(                                                   \
 157                "/* Get user asm, inline. */\n"                         \
 158        "1:\t"  "ld"#size "a [%2] %%asi, %1\n\t"                        \
 159                "clr    %0\n"                                           \
 160        "2:\n\n\t"                                                      \
 161                ".section .fixup,#alloc,#execinstr\n\t"                 \
 162                ".align 4\n"                                            \
 163        "3:\n\t"                                                        \
 164                "sethi  %%hi(2b), %0\n\t"                               \
 165                "clr    %1\n\t"                                         \
 166                "jmpl   %0 + %%lo(2b), %%g0\n\t"                        \
 167                " mov   %3, %0\n\n\t"                                   \
 168                ".previous\n\t"                                         \
 169                ".section __ex_table,\"a\"\n\t"                         \
 170                ".align 4\n\t"                                          \
 171                ".word  1b, 3b\n\n\t"                                   \
 172                ".previous\n\t"                                         \
 173               : "=r" (ret), "=r" (x) : "r" (__m(addr)),                \
 174                 "i" (-EFAULT))
 175
 176int __get_user_bad(void);
 177
 178unsigned long __must_check raw_copy_from_user(void *to,
 179                                             const void __user *from,
 180                                             unsigned long size);
 181
 182unsigned long __must_check raw_copy_to_user(void __user *to,
 183                                           const void *from,
 184                                           unsigned long size);
 185#define INLINE_COPY_FROM_USER
 186#define INLINE_COPY_TO_USER
 187
 188unsigned long __must_check raw_copy_in_user(void __user *to,
 189                                           const void __user *from,
 190                                           unsigned long size);
 191
 192unsigned long __must_check __clear_user(void __user *, unsigned long);
 193
 194#define clear_user __clear_user
 195
 196__must_check long strnlen_user(const char __user *str, long n);
 197
 198struct pt_regs;
 199unsigned long compute_effective_address(struct pt_regs *,
 200                                        unsigned int insn,
 201                                        unsigned int rd);
 202
 203#endif /* _ASM_UACCESS_H */
 204