linux/arch/sparc/include/asm/uaccess_32.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * uaccess.h: User space memore access functions.
   4 *
   5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   7 */
   8#ifndef _ASM_UACCESS_H
   9#define _ASM_UACCESS_H
  10
  11#include <linux/compiler.h>
  12#include <linux/string.h>
  13
  14#include <asm/processor.h>
  15
  16#define ARCH_HAS_SORT_EXTABLE
  17#define ARCH_HAS_SEARCH_EXTABLE
  18
  19/* Sparc is not segmented, however we need to be able to fool access_ok()
  20 * when doing system calls from kernel mode legitimately.
  21 *
  22 * "For historical reasons, these macros are grossly misnamed." -Linus
  23 */
  24
  25#define KERNEL_DS   ((mm_segment_t) { 0 })
  26#define USER_DS     ((mm_segment_t) { -1 })
  27
  28#define get_fs()        (current->thread.current_ds)
  29#define set_fs(val)     ((current->thread.current_ds) = (val))
  30
  31#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
  32
  33/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
  34 * can be fairly lightweight.
  35 * No one can read/write anything from userland in the kernel space by setting
  36 * large size and address near to PAGE_OFFSET - a fault will break his intentions.
  37 */
  38#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
  39#define __kernel_ok (uaccess_kernel())
  40#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
  41#define access_ok(addr, size) __access_ok((unsigned long)(addr), size)
  42
  43/*
  44 * The exception table consists of pairs of addresses: the first is the
  45 * address of an instruction that is allowed to fault, and the second is
  46 * the address at which the program should continue.  No registers are
  47 * modified, so it is entirely up to the continuation code to figure out
  48 * what to do.
  49 *
  50 * All the routines below use bits of fixup code that are out of line
  51 * with the main instruction path.  This means when everything is well,
  52 * we don't even have to jump over them.  Further, they do not intrude
  53 * on our cache or tlb entries.
  54 *
  55 * There is a special way how to put a range of potentially faulting
  56 * insns (like twenty ldd/std's with now intervening other instructions)
  57 * You specify address of first in insn and 0 in fixup and in the next
  58 * exception_table_entry you specify last potentially faulting insn + 1
  59 * and in fixup the routine which should handle the fault.
  60 * That fixup code will get
  61 * (faulting_insn_address - first_insn_in_the_range_address)/4
  62 * in %g2 (ie. index of the faulting instruction in the range).
  63 */
  64
  65struct exception_table_entry
  66{
  67        unsigned long insn, fixup;
  68};
  69
  70/* Returns 0 if exception not found and fixup otherwise.  */
  71unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
  72
  73/* Uh, these should become the main single-value transfer routines..
  74 * They automatically use the right size if we just have the right
  75 * pointer type..
  76 *
  77 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  78 * and yet we don't want to do any pointers, because that is too much
  79 * of a performance impact. Thus we have a few rather ugly macros here,
  80 * and hide all the ugliness from the user.
  81 */
  82#define put_user(x, ptr) ({ \
  83        unsigned long __pu_addr = (unsigned long)(ptr); \
  84        __chk_user_ptr(ptr); \
  85        __put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
  86})
  87
  88#define get_user(x, ptr) ({ \
  89        unsigned long __gu_addr = (unsigned long)(ptr); \
  90        __chk_user_ptr(ptr); \
  91        __get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
  92})
  93
  94/*
  95 * The "__xxx" versions do not do address space checking, useful when
  96 * doing multiple accesses to the same area (the user has to do the
  97 * checks by hand with "access_ok()")
  98 */
  99#define __put_user(x, ptr) \
 100        __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 101#define __get_user(x, ptr) \
 102    __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
 103
 104struct __large_struct { unsigned long buf[100]; };
 105#define __m(x) ((struct __large_struct __user *)(x))
 106
 107#define __put_user_check(x, addr, size) ({ \
 108        register int __pu_ret; \
 109        if (__access_ok(addr, size)) { \
 110                switch (size) { \
 111                case 1: \
 112                        __put_user_asm(x, b, addr, __pu_ret); \
 113                        break; \
 114                case 2: \
 115                        __put_user_asm(x, h, addr, __pu_ret); \
 116                        break; \
 117                case 4: \
 118                        __put_user_asm(x, , addr, __pu_ret); \
 119                        break; \
 120                case 8: \
 121                        __put_user_asm(x, d, addr, __pu_ret); \
 122                        break; \
 123                default: \
 124                        __pu_ret = __put_user_bad(); \
 125                        break; \
 126                } \
 127        } else { \
 128                __pu_ret = -EFAULT; \
 129        } \
 130        __pu_ret; \
 131})
 132
 133#define __put_user_nocheck(x, addr, size) ({                    \
 134        register int __pu_ret;                                  \
 135        switch (size) {                                         \
 136        case 1: __put_user_asm(x, b, addr, __pu_ret); break;    \
 137        case 2: __put_user_asm(x, h, addr, __pu_ret); break;    \
 138        case 4: __put_user_asm(x, , addr, __pu_ret); break;     \
 139        case 8: __put_user_asm(x, d, addr, __pu_ret); break;    \
 140        default: __pu_ret = __put_user_bad(); break;            \
 141        } \
 142        __pu_ret; \
 143})
 144
 145#define __put_user_asm(x, size, addr, ret)                              \
 146__asm__ __volatile__(                                                   \
 147                "/* Put user asm, inline. */\n"                         \
 148        "1:\t"  "st"#size " %1, %2\n\t"                                 \
 149                "clr    %0\n"                                           \
 150        "2:\n\n\t"                                                      \
 151                ".section .fixup,#alloc,#execinstr\n\t"                 \
 152                ".align 4\n"                                            \
 153        "3:\n\t"                                                        \
 154                "b      2b\n\t"                                         \
 155                " mov   %3, %0\n\t"                                     \
 156                ".previous\n\n\t"                                       \
 157                ".section __ex_table,#alloc\n\t"                        \
 158                ".align 4\n\t"                                          \
 159                ".word  1b, 3b\n\t"                                     \
 160                ".previous\n\n\t"                                       \
 161               : "=&r" (ret) : "r" (x), "m" (*__m(addr)),               \
 162                 "i" (-EFAULT))
 163
 164int __put_user_bad(void);
 165
 166#define __get_user_check(x, addr, size, type) ({ \
 167        register int __gu_ret; \
 168        register unsigned long __gu_val; \
 169        if (__access_ok(addr, size)) { \
 170                switch (size) { \
 171                case 1: \
 172                         __get_user_asm(__gu_val, ub, addr, __gu_ret); \
 173                        break; \
 174                case 2: \
 175                        __get_user_asm(__gu_val, uh, addr, __gu_ret); \
 176                        break; \
 177                case 4: \
 178                        __get_user_asm(__gu_val, , addr, __gu_ret); \
 179                        break; \
 180                case 8: \
 181                        __get_user_asm(__gu_val, d, addr, __gu_ret); \
 182                        break; \
 183                default: \
 184                        __gu_val = 0; \
 185                        __gu_ret = __get_user_bad(); \
 186                        break; \
 187                } \
 188         } else { \
 189                 __gu_val = 0; \
 190                 __gu_ret = -EFAULT; \
 191        } \
 192        x = (__force type) __gu_val; \
 193        __gu_ret; \
 194})
 195
 196#define __get_user_nocheck(x, addr, size, type) ({                      \
 197        register int __gu_ret;                                          \
 198        register unsigned long __gu_val;                                \
 199        switch (size) {                                                 \
 200        case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break;    \
 201        case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break;    \
 202        case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break;      \
 203        case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break;     \
 204        default:                                                        \
 205                __gu_val = 0;                                           \
 206                __gu_ret = __get_user_bad();                            \
 207                break;                                                  \
 208        }                                                               \
 209        x = (__force type) __gu_val;                                    \
 210        __gu_ret;                                                       \
 211})
 212
 213#define __get_user_asm(x, size, addr, ret)                              \
 214__asm__ __volatile__(                                                   \
 215                "/* Get user asm, inline. */\n"                         \
 216        "1:\t"  "ld"#size " %2, %1\n\t"                                 \
 217                "clr    %0\n"                                           \
 218        "2:\n\n\t"                                                      \
 219                ".section .fixup,#alloc,#execinstr\n\t"                 \
 220                ".align 4\n"                                            \
 221        "3:\n\t"                                                        \
 222                "clr    %1\n\t"                                         \
 223                "b      2b\n\t"                                         \
 224                " mov   %3, %0\n\n\t"                                   \
 225                ".previous\n\t"                                         \
 226                ".section __ex_table,#alloc\n\t"                        \
 227                ".align 4\n\t"                                          \
 228                ".word  1b, 3b\n\n\t"                                   \
 229                ".previous\n\t"                                         \
 230               : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),             \
 231                 "i" (-EFAULT))
 232
 233int __get_user_bad(void);
 234
 235unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
 236
 237static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 238{
 239        return __copy_user(to, (__force void __user *) from, n);
 240}
 241
 242static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 243{
 244        return __copy_user((__force void __user *) to, from, n);
 245}
 246
 247#define INLINE_COPY_FROM_USER
 248#define INLINE_COPY_TO_USER
 249
 250static inline unsigned long __clear_user(void __user *addr, unsigned long size)
 251{
 252        unsigned long ret;
 253
 254        __asm__ __volatile__ (
 255                ".section __ex_table,#alloc\n\t"
 256                ".align 4\n\t"
 257                ".word 1f,3\n\t"
 258                ".previous\n\t"
 259                "mov %2, %%o1\n"
 260                "1:\n\t"
 261                "call __bzero\n\t"
 262                " mov %1, %%o0\n\t"
 263                "mov %%o0, %0\n"
 264                : "=r" (ret) : "r" (addr), "r" (size) :
 265                "o0", "o1", "o2", "o3", "o4", "o5", "o7",
 266                "g1", "g2", "g3", "g4", "g5", "g7", "cc");
 267
 268        return ret;
 269}
 270
 271static inline unsigned long clear_user(void __user *addr, unsigned long n)
 272{
 273        if (n && __access_ok((unsigned long) addr, n))
 274                return __clear_user(addr, n);
 275        else
 276                return n;
 277}
 278
 279__must_check long strnlen_user(const char __user *str, long n);
 280
 281#endif /* _ASM_UACCESS_H */
 282