linux/arch/powerpc/include/asm/uaccess.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ARCH_POWERPC_UACCESS_H
   3#define _ARCH_POWERPC_UACCESS_H
   4
   5#include <asm/ppc_asm.h>
   6#include <asm/processor.h>
   7#include <asm/page.h>
   8#include <asm/extable.h>
   9#include <asm/kup.h>
  10
  11/*
  12 * The fs value determines whether argument validity checking should be
  13 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  14 * get_fs() == KERNEL_DS, checking is bypassed.
  15 *
  16 * For historical reasons, these macros are grossly misnamed.
  17 *
  18 * The fs/ds values are now the highest legal address in the "segment".
  19 * This simplifies the checking in the routines below.
  20 */
  21
  22#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
  23
  24#define KERNEL_DS       MAKE_MM_SEG(~0UL)
  25#ifdef __powerpc64__
  26/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
  27#define USER_DS         MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
  28#else
  29#define USER_DS         MAKE_MM_SEG(TASK_SIZE - 1)
  30#endif
  31
  32#define get_fs()        (current->thread.addr_limit)
  33
  34static inline void set_fs(mm_segment_t fs)
  35{
  36        current->thread.addr_limit = fs;
  37        /* On user-mode return check addr_limit (fs) is correct */
  38        set_thread_flag(TIF_FSCHECK);
  39}
  40
  41#define segment_eq(a, b)        ((a).seg == (b).seg)
  42
  43#define user_addr_max() (get_fs().seg)
  44
  45#ifdef __powerpc64__
  46/*
  47 * This check is sufficient because there is a large enough
  48 * gap between user addresses and the kernel addresses
  49 */
  50#define __access_ok(addr, size, segment)        \
  51        (((addr) <= (segment).seg) && ((size) <= (segment).seg))
  52
  53#else
  54
  55static inline int __access_ok(unsigned long addr, unsigned long size,
  56                        mm_segment_t seg)
  57{
  58        if (addr > seg.seg)
  59                return 0;
  60        return (size == 0 || size - 1 <= seg.seg - addr);
  61}
  62
  63#endif
  64
  65#define access_ok(addr, size)           \
  66        (__chk_user_ptr(addr),          \
  67         __access_ok((__force unsigned long)(addr), (size), get_fs()))
  68
  69/*
  70 * These are the main single-value transfer routines.  They automatically
  71 * use the right size if we just have the right pointer type.
  72 *
  73 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  74 * and yet we don't want to do any pointers, because that is too much
  75 * of a performance impact. Thus we have a few rather ugly macros here,
  76 * and hide all the ugliness from the user.
  77 *
  78 * The "__xxx" versions of the user access functions are versions that
  79 * do not verify the address space, that must have been done previously
  80 * with a separate "access_ok()" call (this is used when we do multiple
  81 * accesses to the same area of user memory).
  82 *
  83 * As we use the same address space for kernel and user data on the
  84 * PowerPC, we can just do these as direct assignments.  (Of course, the
  85 * exception handling means that it's no longer "just"...)
  86 *
  87 */
  88#define get_user(x, ptr) \
  89        __get_user_check((x), (ptr), sizeof(*(ptr)))
  90#define put_user(x, ptr) \
  91        __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  92
  93#define __get_user(x, ptr) \
  94        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  95#define __put_user(x, ptr) \
  96        __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  97
  98#define __get_user_inatomic(x, ptr) \
  99        __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
 100#define __put_user_inatomic(x, ptr) \
 101        __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 102
 103extern long __put_user_bad(void);
 104
 105/*
 106 * We don't tell gcc that we are accessing memory, but this is OK
 107 * because we do not write to any memory gcc knows about, so there
 108 * are no aliasing issues.
 109 */
 110#define __put_user_asm(x, addr, err, op)                        \
 111        __asm__ __volatile__(                                   \
 112                "1:     " op " %1,0(%2) # put_user\n"           \
 113                "2:\n"                                          \
 114                ".section .fixup,\"ax\"\n"                      \
 115                "3:     li %0,%3\n"                             \
 116                "       b 2b\n"                                 \
 117                ".previous\n"                                   \
 118                EX_TABLE(1b, 3b)                                \
 119                : "=r" (err)                                    \
 120                : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
 121
 122#ifdef __powerpc64__
 123#define __put_user_asm2(x, ptr, retval)                         \
 124          __put_user_asm(x, ptr, retval, "std")
 125#else /* __powerpc64__ */
 126#define __put_user_asm2(x, addr, err)                           \
 127        __asm__ __volatile__(                                   \
 128                "1:     stw %1,0(%2)\n"                         \
 129                "2:     stw %1+1,4(%2)\n"                       \
 130                "3:\n"                                          \
 131                ".section .fixup,\"ax\"\n"                      \
 132                "4:     li %0,%3\n"                             \
 133                "       b 3b\n"                                 \
 134                ".previous\n"                                   \
 135                EX_TABLE(1b, 4b)                                \
 136                EX_TABLE(2b, 4b)                                \
 137                : "=r" (err)                                    \
 138                : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
 139#endif /* __powerpc64__ */
 140
 141#define __put_user_size(x, ptr, size, retval)                   \
 142do {                                                            \
 143        retval = 0;                                             \
 144        allow_write_to_user(ptr, size);                         \
 145        switch (size) {                                         \
 146          case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
 147          case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
 148          case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
 149          case 8: __put_user_asm2(x, ptr, retval); break;       \
 150          default: __put_user_bad();                            \
 151        }                                                       \
 152        prevent_write_to_user(ptr, size);                       \
 153} while (0)
 154
 155#define __put_user_nocheck(x, ptr, size)                        \
 156({                                                              \
 157        long __pu_err;                                          \
 158        __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
 159        if (!is_kernel_addr((unsigned long)__pu_addr))          \
 160                might_fault();                                  \
 161        __chk_user_ptr(ptr);                                    \
 162        __put_user_size((x), __pu_addr, (size), __pu_err);      \
 163        __pu_err;                                               \
 164})
 165
 166#define __put_user_check(x, ptr, size)                                  \
 167({                                                                      \
 168        long __pu_err = -EFAULT;                                        \
 169        __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
 170        might_fault();                                                  \
 171        if (access_ok(__pu_addr, size))                 \
 172                __put_user_size((x), __pu_addr, (size), __pu_err);      \
 173        __pu_err;                                                       \
 174})
 175
 176#define __put_user_nosleep(x, ptr, size)                        \
 177({                                                              \
 178        long __pu_err;                                          \
 179        __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
 180        __chk_user_ptr(ptr);                                    \
 181        __put_user_size((x), __pu_addr, (size), __pu_err);      \
 182        __pu_err;                                               \
 183})
 184
 185
 186extern long __get_user_bad(void);
 187
 188/*
 189 * This does an atomic 128 byte aligned load from userspace.
 190 * Upto caller to do enable_kernel_vmx() before calling!
 191 */
 192#define __get_user_atomic_128_aligned(kaddr, uaddr, err)                \
 193        __asm__ __volatile__(                           \
 194                "1:     lvx  0,0,%1     # get user\n"   \
 195                "       stvx 0,0,%2     # put kernel\n" \
 196                "2:\n"                                  \
 197                ".section .fixup,\"ax\"\n"              \
 198                "3:     li %0,%3\n"                     \
 199                "       b 2b\n"                         \
 200                ".previous\n"                           \
 201                EX_TABLE(1b, 3b)                        \
 202                : "=r" (err)                    \
 203                : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
 204
 205#define __get_user_asm(x, addr, err, op)                \
 206        __asm__ __volatile__(                           \
 207                "1:     "op" %1,0(%2)   # get_user\n"   \
 208                "2:\n"                                  \
 209                ".section .fixup,\"ax\"\n"              \
 210                "3:     li %0,%3\n"                     \
 211                "       li %1,0\n"                      \
 212                "       b 2b\n"                         \
 213                ".previous\n"                           \
 214                EX_TABLE(1b, 3b)                        \
 215                : "=r" (err), "=r" (x)                  \
 216                : "b" (addr), "i" (-EFAULT), "0" (err))
 217
 218#ifdef __powerpc64__
 219#define __get_user_asm2(x, addr, err)                   \
 220        __get_user_asm(x, addr, err, "ld")
 221#else /* __powerpc64__ */
 222#define __get_user_asm2(x, addr, err)                   \
 223        __asm__ __volatile__(                           \
 224                "1:     lwz %1,0(%2)\n"                 \
 225                "2:     lwz %1+1,4(%2)\n"               \
 226                "3:\n"                                  \
 227                ".section .fixup,\"ax\"\n"              \
 228                "4:     li %0,%3\n"                     \
 229                "       li %1,0\n"                      \
 230                "       li %1+1,0\n"                    \
 231                "       b 3b\n"                         \
 232                ".previous\n"                           \
 233                EX_TABLE(1b, 4b)                        \
 234                EX_TABLE(2b, 4b)                        \
 235                : "=r" (err), "=&r" (x)                 \
 236                : "b" (addr), "i" (-EFAULT), "0" (err))
 237#endif /* __powerpc64__ */
 238
 239#define __get_user_size(x, ptr, size, retval)                   \
 240do {                                                            \
 241        retval = 0;                                             \
 242        __chk_user_ptr(ptr);                                    \
 243        if (size > sizeof(x))                                   \
 244                (x) = __get_user_bad();                         \
 245        allow_read_from_user(ptr, size);                        \
 246        switch (size) {                                         \
 247        case 1: __get_user_asm(x, ptr, retval, "lbz"); break;   \
 248        case 2: __get_user_asm(x, ptr, retval, "lhz"); break;   \
 249        case 4: __get_user_asm(x, ptr, retval, "lwz"); break;   \
 250        case 8: __get_user_asm2(x, ptr, retval);  break;        \
 251        default: (x) = __get_user_bad();                        \
 252        }                                                       \
 253        prevent_read_from_user(ptr, size);                      \
 254} while (0)
 255
 256/*
 257 * This is a type: either unsigned long, if the argument fits into
 258 * that type, or otherwise unsigned long long.
 259 */
 260#define __long_type(x) \
 261        __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 262
 263#define __get_user_nocheck(x, ptr, size)                        \
 264({                                                              \
 265        long __gu_err;                                          \
 266        __long_type(*(ptr)) __gu_val;                           \
 267        __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
 268        __chk_user_ptr(ptr);                                    \
 269        if (!is_kernel_addr((unsigned long)__gu_addr))          \
 270                might_fault();                                  \
 271        barrier_nospec();                                       \
 272        __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
 273        (x) = (__typeof__(*(ptr)))__gu_val;                     \
 274        __gu_err;                                               \
 275})
 276
 277#define __get_user_check(x, ptr, size)                                  \
 278({                                                                      \
 279        long __gu_err = -EFAULT;                                        \
 280        __long_type(*(ptr)) __gu_val = 0;                               \
 281        __typeof__(*(ptr)) __user *__gu_addr = (ptr);           \
 282        might_fault();                                                  \
 283        if (access_ok(__gu_addr, (size))) {             \
 284                barrier_nospec();                                       \
 285                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
 286        }                                                               \
 287        (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
 288        __gu_err;                                                       \
 289})
 290
 291#define __get_user_nosleep(x, ptr, size)                        \
 292({                                                              \
 293        long __gu_err;                                          \
 294        __long_type(*(ptr)) __gu_val;                           \
 295        __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
 296        __chk_user_ptr(ptr);                                    \
 297        barrier_nospec();                                       \
 298        __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
 299        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 300        __gu_err;                                               \
 301})
 302
 303
 304/* more complex routines */
 305
 306extern unsigned long __copy_tofrom_user(void __user *to,
 307                const void __user *from, unsigned long size);
 308
 309#ifdef __powerpc64__
 310static inline unsigned long
 311raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
 312{
 313        unsigned long ret;
 314
 315        barrier_nospec();
 316        allow_user_access(to, from, n);
 317        ret = __copy_tofrom_user(to, from, n);
 318        prevent_user_access(to, from, n);
 319        return ret;
 320}
 321#endif /* __powerpc64__ */
 322
 323static inline unsigned long raw_copy_from_user(void *to,
 324                const void __user *from, unsigned long n)
 325{
 326        unsigned long ret;
 327        if (__builtin_constant_p(n) && (n <= 8)) {
 328                ret = 1;
 329
 330                switch (n) {
 331                case 1:
 332                        barrier_nospec();
 333                        __get_user_size(*(u8 *)to, from, 1, ret);
 334                        break;
 335                case 2:
 336                        barrier_nospec();
 337                        __get_user_size(*(u16 *)to, from, 2, ret);
 338                        break;
 339                case 4:
 340                        barrier_nospec();
 341                        __get_user_size(*(u32 *)to, from, 4, ret);
 342                        break;
 343                case 8:
 344                        barrier_nospec();
 345                        __get_user_size(*(u64 *)to, from, 8, ret);
 346                        break;
 347                }
 348                if (ret == 0)
 349                        return 0;
 350        }
 351
 352        barrier_nospec();
 353        allow_read_from_user(from, n);
 354        ret = __copy_tofrom_user((__force void __user *)to, from, n);
 355        prevent_read_from_user(from, n);
 356        return ret;
 357}
 358
 359static inline unsigned long raw_copy_to_user(void __user *to,
 360                const void *from, unsigned long n)
 361{
 362        unsigned long ret;
 363        if (__builtin_constant_p(n) && (n <= 8)) {
 364                ret = 1;
 365
 366                switch (n) {
 367                case 1:
 368                        __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
 369                        break;
 370                case 2:
 371                        __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
 372                        break;
 373                case 4:
 374                        __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
 375                        break;
 376                case 8:
 377                        __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
 378                        break;
 379                }
 380                if (ret == 0)
 381                        return 0;
 382        }
 383
 384        allow_write_to_user(to, n);
 385        ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
 386        prevent_write_to_user(to, n);
 387        return ret;
 388}
 389
 390extern unsigned long __clear_user(void __user *addr, unsigned long size);
 391
 392static inline unsigned long clear_user(void __user *addr, unsigned long size)
 393{
 394        unsigned long ret = size;
 395        might_fault();
 396        if (likely(access_ok(addr, size))) {
 397                allow_write_to_user(addr, size);
 398                ret = __clear_user(addr, size);
 399                prevent_write_to_user(addr, size);
 400        }
 401        return ret;
 402}
 403
 404extern long strncpy_from_user(char *dst, const char __user *src, long count);
 405extern __must_check long strnlen_user(const char __user *str, long n);
 406
 407extern long __copy_from_user_flushcache(void *dst, const void __user *src,
 408                unsigned size);
 409extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
 410                           size_t len);
 411
 412#endif  /* _ARCH_POWERPC_UACCESS_H */
 413