linux/arch/sparc/include/asm/uaccess_64.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_UACCESS_H
   3#define _ASM_UACCESS_H
   4
   5/*
   6 * User space memory access functions
   7 */
   8
   9#include <linux/compiler.h>
  10#include <linux/string.h>
  11#include <asm/asi.h>
  12#include <asm/spitfire.h>
  13
  14#include <asm/processor.h>
  15
  16/*
  17 * Sparc64 is segmented, though more like the M68K than the I386.
  18 * We use the secondary ASI to address user memory, which references a
  19 * completely different VM map, thus there is zero chance of the user
  20 * doing something queer and tricking us into poking kernel memory.
  21 *
  22 * What is left here is basically what is needed for the other parts of
  23 * the kernel that expect to be able to manipulate, erum, "segments".
  24 * Or perhaps more properly, permissions.
  25 *
  26 * "For historical reasons, these macros are grossly misnamed." -Linus
  27 */
  28
  29#define KERNEL_DS   ((mm_segment_t) { ASI_P })
  30#define USER_DS     ((mm_segment_t) { ASI_AIUS })       /* har har har */
  31
  32#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
  33
  34#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
  35
  36#define set_fs(val)                                                             \
  37do {                                                                            \
  38        current_thread_info()->current_ds = (val).seg;                          \
  39        __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));        \
  40} while(0)
  41
  42/*
  43 * Test whether a block of memory is a valid user space address.
  44 * Returns 0 if the range is valid, nonzero otherwise.
  45 */
  46static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
  47{
  48        if (__builtin_constant_p(size))
  49                return addr > limit - size;
  50
  51        addr += size;
  52        if (addr < size)
  53                return true;
  54
  55        return addr > limit;
  56}
  57
  58#define __range_not_ok(addr, size, limit)                               \
  59({                                                                      \
  60        __chk_user_ptr(addr);                                           \
  61        __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
  62})
  63
  64static inline int __access_ok(const void __user * addr, unsigned long size)
  65{
  66        return 1;
  67}
  68
  69static inline int access_ok(const void __user * addr, unsigned long size)
  70{
  71        return 1;
  72}
  73
  74void __retl_efault(void);
  75
  76/* Uh, these should become the main single-value transfer routines..
  77 * They automatically use the right size if we just have the right
  78 * pointer type..
  79 *
  80 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  81 * and yet we don't want to do any pointers, because that is too much
  82 * of a performance impact. Thus we have a few rather ugly macros here,
  83 * and hide all the ugliness from the user.
  84 */
  85#define put_user(x, ptr) ({ \
  86        unsigned long __pu_addr = (unsigned long)(ptr); \
  87        __chk_user_ptr(ptr); \
  88        __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
  89})
  90
  91#define get_user(x, ptr) ({ \
  92        unsigned long __gu_addr = (unsigned long)(ptr); \
  93        __chk_user_ptr(ptr); \
  94        __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
  95})
  96
  97#define __put_user(x, ptr) put_user(x, ptr)
  98#define __get_user(x, ptr) get_user(x, ptr)
  99
 100struct __large_struct { unsigned long buf[100]; };
 101#define __m(x) ((struct __large_struct *)(x))
 102
 103#define __put_user_nocheck(data, addr, size) ({                 \
 104        register int __pu_ret;                                  \
 105        switch (size) {                                         \
 106        case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
 107        case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
 108        case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
 109        case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
 110        default: __pu_ret = __put_user_bad(); break;            \
 111        }                                                       \
 112        __pu_ret;                                               \
 113})
 114
 115#define __put_user_asm(x, size, addr, ret)                              \
 116__asm__ __volatile__(                                                   \
 117                "/* Put user asm, inline. */\n"                         \
 118        "1:\t"  "st"#size "a %1, [%2] %%asi\n\t"                        \
 119                "clr    %0\n"                                           \
 120        "2:\n\n\t"                                                      \
 121                ".section .fixup,#alloc,#execinstr\n\t"                 \
 122                ".align 4\n"                                            \
 123        "3:\n\t"                                                        \
 124                "sethi  %%hi(2b), %0\n\t"                               \
 125                "jmpl   %0 + %%lo(2b), %%g0\n\t"                        \
 126                " mov   %3, %0\n\n\t"                                   \
 127                ".previous\n\t"                                         \
 128                ".section __ex_table,\"a\"\n\t"                         \
 129                ".align 4\n\t"                                          \
 130                ".word  1b, 3b\n\t"                                     \
 131                ".previous\n\n\t"                                       \
 132               : "=r" (ret) : "r" (x), "r" (__m(addr)),                 \
 133                 "i" (-EFAULT))
 134
 135int __put_user_bad(void);
 136
 137#define __get_user_nocheck(data, addr, size, type) ({                        \
 138        register int __gu_ret;                                               \
 139        register unsigned long __gu_val;                                     \
 140        switch (size) {                                                      \
 141                case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
 142                case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
 143                case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
 144                case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
 145                default:                                                     \
 146                        __gu_val = 0;                                        \
 147                        __gu_ret = __get_user_bad();                         \
 148                        break;                                               \
 149        }                                                                    \
 150        data = (__force type) __gu_val;                                      \
 151         __gu_ret;                                                           \
 152})
 153
 154#define __get_user_asm(x, size, addr, ret)                              \
 155__asm__ __volatile__(                                                   \
 156                "/* Get user asm, inline. */\n"                         \
 157        "1:\t"  "ld"#size "a [%2] %%asi, %1\n\t"                        \
 158                "clr    %0\n"                                           \
 159        "2:\n\n\t"                                                      \
 160                ".section .fixup,#alloc,#execinstr\n\t"                 \
 161                ".align 4\n"                                            \
 162        "3:\n\t"                                                        \
 163                "sethi  %%hi(2b), %0\n\t"                               \
 164                "clr    %1\n\t"                                         \
 165                "jmpl   %0 + %%lo(2b), %%g0\n\t"                        \
 166                " mov   %3, %0\n\n\t"                                   \
 167                ".previous\n\t"                                         \
 168                ".section __ex_table,\"a\"\n\t"                         \
 169                ".align 4\n\t"                                          \
 170                ".word  1b, 3b\n\n\t"                                   \
 171                ".previous\n\t"                                         \
 172               : "=r" (ret), "=r" (x) : "r" (__m(addr)),                \
 173                 "i" (-EFAULT))
 174
 175int __get_user_bad(void);
 176
 177unsigned long __must_check raw_copy_from_user(void *to,
 178                                             const void __user *from,
 179                                             unsigned long size);
 180
 181unsigned long __must_check raw_copy_to_user(void __user *to,
 182                                           const void *from,
 183                                           unsigned long size);
 184#define INLINE_COPY_FROM_USER
 185#define INLINE_COPY_TO_USER
 186
 187unsigned long __must_check raw_copy_in_user(void __user *to,
 188                                           const void __user *from,
 189                                           unsigned long size);
 190
 191unsigned long __must_check __clear_user(void __user *, unsigned long);
 192
 193#define clear_user __clear_user
 194
 195__must_check long strnlen_user(const char __user *str, long n);
 196
 197struct pt_regs;
 198unsigned long compute_effective_address(struct pt_regs *,
 199                                        unsigned int insn,
 200                                        unsigned int rd);
 201
 202#endif /* _ASM_UACCESS_H */
 203