linux/arch/powerpc/include/asm/uaccess.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ARCH_POWERPC_UACCESS_H
   3#define _ARCH_POWERPC_UACCESS_H
   4
   5#include <asm/ppc_asm.h>
   6#include <asm/processor.h>
   7#include <asm/page.h>
   8#include <asm/extable.h>
   9#include <asm/kup.h>
  10
  11#ifdef __powerpc64__
  12/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
  13#define TASK_SIZE_MAX           TASK_SIZE_USER64
  14#endif
  15
  16#include <asm-generic/access_ok.h>
  17
  18/*
  19 * These are the main single-value transfer routines.  They automatically
  20 * use the right size if we just have the right pointer type.
  21 *
  22 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  23 * and yet we don't want to do any pointers, because that is too much
  24 * of a performance impact. Thus we have a few rather ugly macros here,
  25 * and hide all the ugliness from the user.
  26 *
  27 * The "__xxx" versions of the user access functions are versions that
  28 * do not verify the address space, that must have been done previously
  29 * with a separate "access_ok()" call (this is used when we do multiple
  30 * accesses to the same area of user memory).
  31 *
  32 * As we use the same address space for kernel and user data on the
  33 * PowerPC, we can just do these as direct assignments.  (Of course, the
  34 * exception handling means that it's no longer "just"...)
  35 *
  36 */
  37#define __put_user(x, ptr)                                      \
  38({                                                              \
  39        long __pu_err;                                          \
  40        __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
  41        __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x);  \
  42        __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr));  \
  43                                                                \
  44        might_fault();                                          \
  45        do {                                                    \
  46                __label__ __pu_failed;                          \
  47                                                                \
  48                allow_write_to_user(__pu_addr, __pu_size);      \
  49                __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed);      \
  50                prevent_write_to_user(__pu_addr, __pu_size);    \
  51                __pu_err = 0;                                   \
  52                break;                                          \
  53                                                                \
  54__pu_failed:                                                    \
  55                prevent_write_to_user(__pu_addr, __pu_size);    \
  56                __pu_err = -EFAULT;                             \
  57        } while (0);                                            \
  58                                                                \
  59        __pu_err;                                               \
  60})
  61
  62#define put_user(x, ptr)                                                \
  63({                                                                      \
  64        __typeof__(*(ptr)) __user *_pu_addr = (ptr);                    \
  65                                                                        \
  66        access_ok(_pu_addr, sizeof(*(ptr))) ?                           \
  67                  __put_user(x, _pu_addr) : -EFAULT;                    \
  68})
  69
  70/*
  71 * We don't tell gcc that we are accessing memory, but this is OK
  72 * because we do not write to any memory gcc knows about, so there
  73 * are no aliasing issues.
  74 */
  75#define __put_user_asm_goto(x, addr, label, op)                 \
  76        asm_volatile_goto(                                      \
  77                "1:     " op "%U1%X1 %0,%1      # put_user\n"   \
  78                EX_TABLE(1b, %l2)                               \
  79                :                                               \
  80                : "r" (x), "m<>" (*addr)                \
  81                :                                               \
  82                : label)
  83
  84#ifdef __powerpc64__
  85#define __put_user_asm2_goto(x, ptr, label)                     \
  86        __put_user_asm_goto(x, ptr, label, "std")
  87#else /* __powerpc64__ */
  88#define __put_user_asm2_goto(x, addr, label)                    \
  89        asm_volatile_goto(                                      \
  90                "1:     stw%X1 %0, %1\n"                        \
  91                "2:     stw%X1 %L0, %L1\n"                      \
  92                EX_TABLE(1b, %l2)                               \
  93                EX_TABLE(2b, %l2)                               \
  94                :                                               \
  95                : "r" (x), "m" (*addr)                          \
  96                :                                               \
  97                : label)
  98#endif /* __powerpc64__ */
  99
 100#define __put_user_size_goto(x, ptr, size, label)               \
 101do {                                                            \
 102        __typeof__(*(ptr)) __user *__pus_addr = (ptr);          \
 103                                                                \
 104        switch (size) {                                         \
 105        case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break;        \
 106        case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break;        \
 107        case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break;        \
 108        case 8: __put_user_asm2_goto(x, __pus_addr, label); break;              \
 109        default: BUILD_BUG();                                   \
 110        }                                                       \
 111} while (0)
 112
 113/*
 114 * This does an atomic 128 byte aligned load from userspace.
 115 * Upto caller to do enable_kernel_vmx() before calling!
 116 */
 117#define __get_user_atomic_128_aligned(kaddr, uaddr, err)                \
 118        __asm__ __volatile__(                           \
 119                ".machine push\n"                       \
 120                ".machine altivec\n"                    \
 121                "1:     lvx  0,0,%1     # get user\n"   \
 122                "       stvx 0,0,%2     # put kernel\n" \
 123                ".machine pop\n"                        \
 124                "2:\n"                                  \
 125                ".section .fixup,\"ax\"\n"              \
 126                "3:     li %0,%3\n"                     \
 127                "       b 2b\n"                         \
 128                ".previous\n"                           \
 129                EX_TABLE(1b, 3b)                        \
 130                : "=r" (err)                    \
 131                : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
 132
 133#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
 134
 135#define __get_user_asm_goto(x, addr, label, op)                 \
 136        asm_volatile_goto(                                      \
 137                "1:     "op"%U1%X1 %0, %1       # get_user\n"   \
 138                EX_TABLE(1b, %l2)                               \
 139                : "=r" (x)                                      \
 140                : "m<>" (*addr)                         \
 141                :                                               \
 142                : label)
 143
 144#ifdef __powerpc64__
 145#define __get_user_asm2_goto(x, addr, label)                    \
 146        __get_user_asm_goto(x, addr, label, "ld")
 147#else /* __powerpc64__ */
 148#define __get_user_asm2_goto(x, addr, label)                    \
 149        asm_volatile_goto(                                      \
 150                "1:     lwz%X1 %0, %1\n"                        \
 151                "2:     lwz%X1 %L0, %L1\n"                      \
 152                EX_TABLE(1b, %l2)                               \
 153                EX_TABLE(2b, %l2)                               \
 154                : "=&r" (x)                                     \
 155                : "m" (*addr)                                   \
 156                :                                               \
 157                : label)
 158#endif /* __powerpc64__ */
 159
 160#define __get_user_size_goto(x, ptr, size, label)                               \
 161do {                                                                            \
 162        BUILD_BUG_ON(size > sizeof(x));                                         \
 163        switch (size) {                                                         \
 164        case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break;  \
 165        case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \
 166        case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \
 167        case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label);  break;      \
 168        default: x = 0; BUILD_BUG();                                            \
 169        }                                                                       \
 170} while (0)
 171
 172#define __get_user_size_allowed(x, ptr, size, retval)                   \
 173do {                                                                    \
 174                __label__ __gus_failed;                                 \
 175                                                                        \
 176                __get_user_size_goto(x, ptr, size, __gus_failed);       \
 177                retval = 0;                                             \
 178                break;                                                  \
 179__gus_failed:                                                           \
 180                x = 0;                                                  \
 181                retval = -EFAULT;                                       \
 182} while (0)
 183
 184#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
 185
 186#define __get_user_asm(x, addr, err, op)                \
 187        __asm__ __volatile__(                           \
 188                "1:     "op"%U2%X2 %1, %2       # get_user\n"   \
 189                "2:\n"                                  \
 190                ".section .fixup,\"ax\"\n"              \
 191                "3:     li %0,%3\n"                     \
 192                "       li %1,0\n"                      \
 193                "       b 2b\n"                         \
 194                ".previous\n"                           \
 195                EX_TABLE(1b, 3b)                        \
 196                : "=r" (err), "=r" (x)                  \
 197                : "m<>" (*addr), "i" (-EFAULT), "0" (err))
 198
 199#ifdef __powerpc64__
 200#define __get_user_asm2(x, addr, err)                   \
 201        __get_user_asm(x, addr, err, "ld")
 202#else /* __powerpc64__ */
 203#define __get_user_asm2(x, addr, err)                   \
 204        __asm__ __volatile__(                           \
 205                "1:     lwz%X2 %1, %2\n"                        \
 206                "2:     lwz%X2 %L1, %L2\n"              \
 207                "3:\n"                                  \
 208                ".section .fixup,\"ax\"\n"              \
 209                "4:     li %0,%3\n"                     \
 210                "       li %1,0\n"                      \
 211                "       li %1+1,0\n"                    \
 212                "       b 3b\n"                         \
 213                ".previous\n"                           \
 214                EX_TABLE(1b, 4b)                        \
 215                EX_TABLE(2b, 4b)                        \
 216                : "=r" (err), "=&r" (x)                 \
 217                : "m" (*addr), "i" (-EFAULT), "0" (err))
 218#endif /* __powerpc64__ */
 219
 220#define __get_user_size_allowed(x, ptr, size, retval)           \
 221do {                                                            \
 222        retval = 0;                                             \
 223        BUILD_BUG_ON(size > sizeof(x));                         \
 224        switch (size) {                                         \
 225        case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break;      \
 226        case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break;     \
 227        case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break;     \
 228        case 8: __get_user_asm2(x, (u64 __user *)ptr, retval);  break;  \
 229        default: x = 0; BUILD_BUG();                            \
 230        }                                                       \
 231} while (0)
 232
 233#define __get_user_size_goto(x, ptr, size, label)               \
 234do {                                                            \
 235        long __gus_retval;                                      \
 236                                                                \
 237        __get_user_size_allowed(x, ptr, size, __gus_retval);    \
 238        if (__gus_retval)                                       \
 239                goto label;                                     \
 240} while (0)
 241
 242#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
 243
 244/*
 245 * This is a type: either unsigned long, if the argument fits into
 246 * that type, or otherwise unsigned long long.
 247 */
 248#define __long_type(x) \
 249        __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 250
 251#define __get_user(x, ptr)                                      \
 252({                                                              \
 253        long __gu_err;                                          \
 254        __long_type(*(ptr)) __gu_val;                           \
 255        __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
 256        __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr));  \
 257                                                                \
 258        might_fault();                                  \
 259        allow_read_from_user(__gu_addr, __gu_size);             \
 260        __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err);      \
 261        prevent_read_from_user(__gu_addr, __gu_size);           \
 262        (x) = (__typeof__(*(ptr)))__gu_val;                     \
 263                                                                \
 264        __gu_err;                                               \
 265})
 266
 267#define get_user(x, ptr)                                                \
 268({                                                                      \
 269        __typeof__(*(ptr)) __user *_gu_addr = (ptr);                    \
 270                                                                        \
 271        access_ok(_gu_addr, sizeof(*(ptr))) ?                           \
 272                  __get_user(x, _gu_addr) :                             \
 273                  ((x) = (__force __typeof__(*(ptr)))0, -EFAULT);       \
 274})
 275
 276/* more complex routines */
 277
 278extern unsigned long __copy_tofrom_user(void __user *to,
 279                const void __user *from, unsigned long size);
 280
 281#ifdef __powerpc64__
 282static inline unsigned long
 283raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
 284{
 285        unsigned long ret;
 286
 287        allow_read_write_user(to, from, n);
 288        ret = __copy_tofrom_user(to, from, n);
 289        prevent_read_write_user(to, from, n);
 290        return ret;
 291}
 292#endif /* __powerpc64__ */
 293
 294static inline unsigned long raw_copy_from_user(void *to,
 295                const void __user *from, unsigned long n)
 296{
 297        unsigned long ret;
 298
 299        allow_read_from_user(from, n);
 300        ret = __copy_tofrom_user((__force void __user *)to, from, n);
 301        prevent_read_from_user(from, n);
 302        return ret;
 303}
 304
 305static inline unsigned long
 306raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 307{
 308        unsigned long ret;
 309
 310        allow_write_to_user(to, n);
 311        ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
 312        prevent_write_to_user(to, n);
 313        return ret;
 314}
 315
 316unsigned long __arch_clear_user(void __user *addr, unsigned long size);
 317
 318static inline unsigned long __clear_user(void __user *addr, unsigned long size)
 319{
 320        unsigned long ret;
 321
 322        might_fault();
 323        allow_write_to_user(addr, size);
 324        ret = __arch_clear_user(addr, size);
 325        prevent_write_to_user(addr, size);
 326        return ret;
 327}
 328
 329static inline unsigned long clear_user(void __user *addr, unsigned long size)
 330{
 331        return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size;
 332}
 333
 334extern long strncpy_from_user(char *dst, const char __user *src, long count);
 335extern __must_check long strnlen_user(const char __user *str, long n);
 336
 337#ifdef CONFIG_ARCH_HAS_COPY_MC
 338unsigned long __must_check
 339copy_mc_generic(void *to, const void *from, unsigned long size);
 340
 341static inline unsigned long __must_check
 342copy_mc_to_kernel(void *to, const void *from, unsigned long size)
 343{
 344        return copy_mc_generic(to, from, size);
 345}
 346#define copy_mc_to_kernel copy_mc_to_kernel
 347
 348static inline unsigned long __must_check
 349copy_mc_to_user(void __user *to, const void *from, unsigned long n)
 350{
 351        if (likely(check_copy_size(from, n, true))) {
 352                if (access_ok(to, n)) {
 353                        allow_write_to_user(to, n);
 354                        n = copy_mc_generic((void *)to, from, n);
 355                        prevent_write_to_user(to, n);
 356                }
 357        }
 358
 359        return n;
 360}
 361#endif
 362
 363extern long __copy_from_user_flushcache(void *dst, const void __user *src,
 364                unsigned size);
 365extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
 366                           size_t len);
 367
 368static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
 369{
 370        if (unlikely(!access_ok(ptr, len)))
 371                return false;
 372
 373        might_fault();
 374
 375        allow_read_write_user((void __user *)ptr, ptr, len);
 376        return true;
 377}
 378#define user_access_begin       user_access_begin
 379#define user_access_end         prevent_current_access_user
 380#define user_access_save        prevent_user_access_return
 381#define user_access_restore     restore_user_access
 382
 383static __must_check inline bool
 384user_read_access_begin(const void __user *ptr, size_t len)
 385{
 386        if (unlikely(!access_ok(ptr, len)))
 387                return false;
 388
 389        might_fault();
 390
 391        allow_read_from_user(ptr, len);
 392        return true;
 393}
 394#define user_read_access_begin  user_read_access_begin
 395#define user_read_access_end            prevent_current_read_from_user
 396
 397static __must_check inline bool
 398user_write_access_begin(const void __user *ptr, size_t len)
 399{
 400        if (unlikely(!access_ok(ptr, len)))
 401                return false;
 402
 403        might_fault();
 404
 405        allow_write_to_user((void __user *)ptr, len);
 406        return true;
 407}
 408#define user_write_access_begin user_write_access_begin
 409#define user_write_access_end           prevent_current_write_to_user
 410
 411#define unsafe_get_user(x, p, e) do {                                   \
 412        __long_type(*(p)) __gu_val;                             \
 413        __typeof__(*(p)) __user *__gu_addr = (p);               \
 414                                                                \
 415        __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
 416        (x) = (__typeof__(*(p)))__gu_val;                       \
 417} while (0)
 418
 419#define unsafe_put_user(x, p, e) \
 420        __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
 421
 422#define unsafe_copy_from_user(d, s, l, e) \
 423do {                                                                                    \
 424        u8 *_dst = (u8 *)(d);                                                           \
 425        const u8 __user *_src = (const u8 __user *)(s);                                 \
 426        size_t _len = (l);                                                              \
 427        int _i;                                                                         \
 428                                                                                        \
 429        for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64))               \
 430                unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e);     \
 431        if (_len & 4) {                                                                 \
 432                unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e);     \
 433                _i += 4;                                                                \
 434        }                                                                               \
 435        if (_len & 2) {                                                                 \
 436                unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e);     \
 437                _i += 2;                                                                \
 438        }                                                                               \
 439        if (_len & 1)                                                                   \
 440                unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e);       \
 441} while (0)
 442
 443#define unsafe_copy_to_user(d, s, l, e) \
 444do {                                                                    \
 445        u8 __user *_dst = (u8 __user *)(d);                             \
 446        const u8 *_src = (const u8 *)(s);                               \
 447        size_t _len = (l);                                              \
 448        int _i;                                                         \
 449                                                                        \
 450        for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64))       \
 451                unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \
 452        if (_len & 4) {                                                 \
 453                unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
 454                _i += 4;                                                \
 455        }                                                               \
 456        if (_len & 2) {                                                 \
 457                unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
 458                _i += 2;                                                \
 459        }                                                               \
 460        if (_len & 1) \
 461                unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
 462} while (0)
 463
 464#define __get_kernel_nofault(dst, src, type, err_label)                 \
 465        __get_user_size_goto(*((type *)(dst)),                          \
 466                (__force type __user *)(src), sizeof(type), err_label)
 467
 468#define __put_kernel_nofault(dst, src, type, err_label)                 \
 469        __put_user_size_goto(*((type *)(src)),                          \
 470                (__force type __user *)(dst), sizeof(type), err_label)
 471
 472#endif  /* _ARCH_POWERPC_UACCESS_H */
 473