linux/arch/arm/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/uaccess.h
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8#ifndef _ASMARM_UACCESS_H
   9#define _ASMARM_UACCESS_H
  10
  11/*
  12 * User space memory access functions
  13 */
  14#include <linux/string.h>
  15#include <asm/memory.h>
  16#include <asm/domain.h>
  17#include <asm/unified.h>
  18#include <asm/compiler.h>
  19
  20#include <asm/extable.h>
  21
  22/*
  23 * These two functions allow hooking accesses to userspace to increase
  24 * system integrity by ensuring that the kernel can not inadvertantly
  25 * perform such accesses (eg, via list poison values) which could then
  26 * be exploited for priviledge escalation.
  27 */
  28static inline unsigned int uaccess_save_and_enable(void)
  29{
  30#ifdef CONFIG_CPU_SW_DOMAIN_PAN
  31        unsigned int old_domain = get_domain();
  32
  33        /* Set the current domain access to permit user accesses */
  34        set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
  35                   domain_val(DOMAIN_USER, DOMAIN_CLIENT));
  36
  37        return old_domain;
  38#else
  39        return 0;
  40#endif
  41}
  42
  43static inline void uaccess_restore(unsigned int flags)
  44{
  45#ifdef CONFIG_CPU_SW_DOMAIN_PAN
  46        /* Restore the user access mask */
  47        set_domain(flags);
  48#endif
  49}
  50
  51/*
  52 * These two are intentionally not defined anywhere - if the kernel
  53 * code generates any references to them, that's a bug.
  54 */
  55extern int __get_user_bad(void);
  56extern int __put_user_bad(void);
  57
  58/*
  59 * Note that this is actually 0x1,0000,0000
  60 */
  61#define KERNEL_DS       0x00000000
  62#define get_ds()        (KERNEL_DS)
  63
  64#ifdef CONFIG_MMU
  65
  66#define USER_DS         TASK_SIZE
  67#define get_fs()        (current_thread_info()->addr_limit)
  68
  69static inline void set_fs(mm_segment_t fs)
  70{
  71        current_thread_info()->addr_limit = fs;
  72        modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
  73}
  74
  75#define segment_eq(a, b)        ((a) == (b))
  76
  77/* We use 33-bit arithmetic here... */
  78#define __range_ok(addr, size) ({ \
  79        unsigned long flag, roksum; \
  80        __chk_user_ptr(addr);   \
  81        __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
  82                : "=&r" (flag), "=&r" (roksum) \
  83                : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
  84                : "cc"); \
  85        flag; })
  86
  87/*
  88 * Single-value transfer routines.  They automatically use the right
  89 * size if we just have the right pointer type.  Note that the functions
  90 * which read from user space (*get_*) need to take care not to leak
  91 * kernel data even if the calling code is buggy and fails to check
  92 * the return value.  This means zeroing out the destination variable
  93 * or buffer on error.  Normally this is done out of line by the
  94 * fixup code, but there are a few places where it intrudes on the
  95 * main code path.  When we only write to user space, there is no
  96 * problem.
  97 */
  98extern int __get_user_1(void *);
  99extern int __get_user_2(void *);
 100extern int __get_user_4(void *);
 101extern int __get_user_32t_8(void *);
 102extern int __get_user_8(void *);
 103extern int __get_user_64t_1(void *);
 104extern int __get_user_64t_2(void *);
 105extern int __get_user_64t_4(void *);
 106
 107#define __GUP_CLOBBER_1 "lr", "cc"
 108#ifdef CONFIG_CPU_USE_DOMAINS
 109#define __GUP_CLOBBER_2 "ip", "lr", "cc"
 110#else
 111#define __GUP_CLOBBER_2 "lr", "cc"
 112#endif
 113#define __GUP_CLOBBER_4 "lr", "cc"
 114#define __GUP_CLOBBER_32t_8 "lr", "cc"
 115#define __GUP_CLOBBER_8 "lr", "cc"
 116
 117#define __get_user_x(__r2, __p, __e, __l, __s)                          \
 118           __asm__ __volatile__ (                                       \
 119                __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
 120                __asmeq("%3", "r1")                                     \
 121                "bl     __get_user_" #__s                               \
 122                : "=&r" (__e), "=r" (__r2)                              \
 123                : "0" (__p), "r" (__l)                                  \
 124                : __GUP_CLOBBER_##__s)
 125
 126/* narrowing a double-word get into a single 32bit word register: */
 127#ifdef __ARMEB__
 128#define __get_user_x_32t(__r2, __p, __e, __l, __s)                      \
 129        __get_user_x(__r2, __p, __e, __l, 32t_8)
 130#else
 131#define __get_user_x_32t __get_user_x
 132#endif
 133
 134/*
 135 * storing result into proper least significant word of 64bit target var,
 136 * different only for big endian case where 64 bit __r2 lsw is r3:
 137 */
 138#ifdef __ARMEB__
 139#define __get_user_x_64t(__r2, __p, __e, __l, __s)                      \
 140           __asm__ __volatile__ (                                       \
 141                __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
 142                __asmeq("%3", "r1")                                     \
 143                "bl     __get_user_64t_" #__s                           \
 144                : "=&r" (__e), "=r" (__r2)                              \
 145                : "0" (__p), "r" (__l)                                  \
 146                : __GUP_CLOBBER_##__s)
 147#else
 148#define __get_user_x_64t __get_user_x
 149#endif
 150
 151
 152#define __get_user_check(x, p)                                          \
 153        ({                                                              \
 154                unsigned long __limit = current_thread_info()->addr_limit - 1; \
 155                register const typeof(*(p)) __user *__p asm("r0") = (p);\
 156                register typeof(x) __r2 asm("r2");                      \
 157                register unsigned long __l asm("r1") = __limit;         \
 158                register int __e asm("r0");                             \
 159                unsigned int __ua_flags = uaccess_save_and_enable();    \
 160                switch (sizeof(*(__p))) {                               \
 161                case 1:                                                 \
 162                        if (sizeof((x)) >= 8)                           \
 163                                __get_user_x_64t(__r2, __p, __e, __l, 1); \
 164                        else                                            \
 165                                __get_user_x(__r2, __p, __e, __l, 1);   \
 166                        break;                                          \
 167                case 2:                                                 \
 168                        if (sizeof((x)) >= 8)                           \
 169                                __get_user_x_64t(__r2, __p, __e, __l, 2); \
 170                        else                                            \
 171                                __get_user_x(__r2, __p, __e, __l, 2);   \
 172                        break;                                          \
 173                case 4:                                                 \
 174                        if (sizeof((x)) >= 8)                           \
 175                                __get_user_x_64t(__r2, __p, __e, __l, 4); \
 176                        else                                            \
 177                                __get_user_x(__r2, __p, __e, __l, 4);   \
 178                        break;                                          \
 179                case 8:                                                 \
 180                        if (sizeof((x)) < 8)                            \
 181                                __get_user_x_32t(__r2, __p, __e, __l, 4); \
 182                        else                                            \
 183                                __get_user_x(__r2, __p, __e, __l, 8);   \
 184                        break;                                          \
 185                default: __e = __get_user_bad(); break;                 \
 186                }                                                       \
 187                uaccess_restore(__ua_flags);                            \
 188                x = (typeof(*(p))) __r2;                                \
 189                __e;                                                    \
 190        })
 191
 192#define get_user(x, p)                                                  \
 193        ({                                                              \
 194                might_fault();                                          \
 195                __get_user_check(x, p);                                 \
 196         })
 197
 198extern int __put_user_1(void *, unsigned int);
 199extern int __put_user_2(void *, unsigned int);
 200extern int __put_user_4(void *, unsigned int);
 201extern int __put_user_8(void *, unsigned long long);
 202
 203#define __put_user_check(__pu_val, __ptr, __err, __s)                   \
 204        ({                                                              \
 205                unsigned long __limit = current_thread_info()->addr_limit - 1; \
 206                register typeof(__pu_val) __r2 asm("r2") = __pu_val;    \
 207                register const void __user *__p asm("r0") = __ptr;      \
 208                register unsigned long __l asm("r1") = __limit;         \
 209                register int __e asm("r0");                             \
 210                __asm__ __volatile__ (                                  \
 211                        __asmeq("%0", "r0") __asmeq("%2", "r2")         \
 212                        __asmeq("%3", "r1")                             \
 213                        "bl     __put_user_" #__s                       \
 214                        : "=&r" (__e)                                   \
 215                        : "0" (__p), "r" (__r2), "r" (__l)              \
 216                        : "ip", "lr", "cc");                            \
 217                __err = __e;                                            \
 218        })
 219
 220#else /* CONFIG_MMU */
 221
 222/*
 223 * uClinux has only one addr space, so has simplified address limits.
 224 */
 225#define USER_DS                 KERNEL_DS
 226
 227#define segment_eq(a, b)                (1)
 228#define __addr_ok(addr)         ((void)(addr), 1)
 229#define __range_ok(addr, size)  ((void)(addr), 0)
 230#define get_fs()                (KERNEL_DS)
 231
 232static inline void set_fs(mm_segment_t fs)
 233{
 234}
 235
 236#define get_user(x, p)  __get_user(x, p)
 237#define __put_user_check __put_user_nocheck
 238
 239#endif /* CONFIG_MMU */
 240
 241#define access_ok(type, addr, size)     (__range_ok(addr, size) == 0)
 242
 243#define user_addr_max() \
 244        (uaccess_kernel() ? ~0UL : get_fs())
 245
 246/*
 247 * The "__xxx" versions of the user access functions do not verify the
 248 * address space - it must have been done previously with a separate
 249 * "access_ok()" call.
 250 *
 251 * The "xxx_error" versions set the third argument to EFAULT if an
 252 * error occurs, and leave it unchanged on success.  Note that these
 253 * versions are void (ie, don't return a value as such).
 254 */
 255#define __get_user(x, ptr)                                              \
 256({                                                                      \
 257        long __gu_err = 0;                                              \
 258        __get_user_err((x), (ptr), __gu_err);                           \
 259        __gu_err;                                                       \
 260})
 261
 262#define __get_user_error(x, ptr, err)                                   \
 263({                                                                      \
 264        __get_user_err((x), (ptr), err);                                \
 265        (void) 0;                                                       \
 266})
 267
 268#define __get_user_err(x, ptr, err)                                     \
 269do {                                                                    \
 270        unsigned long __gu_addr = (unsigned long)(ptr);                 \
 271        unsigned long __gu_val;                                         \
 272        unsigned int __ua_flags;                                        \
 273        __chk_user_ptr(ptr);                                            \
 274        might_fault();                                                  \
 275        __ua_flags = uaccess_save_and_enable();                         \
 276        switch (sizeof(*(ptr))) {                                       \
 277        case 1: __get_user_asm_byte(__gu_val, __gu_addr, err);  break;  \
 278        case 2: __get_user_asm_half(__gu_val, __gu_addr, err);  break;  \
 279        case 4: __get_user_asm_word(__gu_val, __gu_addr, err);  break;  \
 280        default: (__gu_val) = __get_user_bad();                         \
 281        }                                                               \
 282        uaccess_restore(__ua_flags);                                    \
 283        (x) = (__typeof__(*(ptr)))__gu_val;                             \
 284} while (0)
 285
 286#define __get_user_asm(x, addr, err, instr)                     \
 287        __asm__ __volatile__(                                   \
 288        "1:     " TUSER(instr) " %1, [%2], #0\n"                \
 289        "2:\n"                                                  \
 290        "       .pushsection .text.fixup,\"ax\"\n"              \
 291        "       .align  2\n"                                    \
 292        "3:     mov     %0, %3\n"                               \
 293        "       mov     %1, #0\n"                               \
 294        "       b       2b\n"                                   \
 295        "       .popsection\n"                                  \
 296        "       .pushsection __ex_table,\"a\"\n"                \
 297        "       .align  3\n"                                    \
 298        "       .long   1b, 3b\n"                               \
 299        "       .popsection"                                    \
 300        : "+r" (err), "=&r" (x)                                 \
 301        : "r" (addr), "i" (-EFAULT)                             \
 302        : "cc")
 303
 304#define __get_user_asm_byte(x, addr, err)                       \
 305        __get_user_asm(x, addr, err, ldrb)
 306
 307#ifndef __ARMEB__
 308#define __get_user_asm_half(x, __gu_addr, err)                  \
 309({                                                              \
 310        unsigned long __b1, __b2;                               \
 311        __get_user_asm_byte(__b1, __gu_addr, err);              \
 312        __get_user_asm_byte(__b2, __gu_addr + 1, err);          \
 313        (x) = __b1 | (__b2 << 8);                               \
 314})
 315#else
 316#define __get_user_asm_half(x, __gu_addr, err)                  \
 317({                                                              \
 318        unsigned long __b1, __b2;                               \
 319        __get_user_asm_byte(__b1, __gu_addr, err);              \
 320        __get_user_asm_byte(__b2, __gu_addr + 1, err);          \
 321        (x) = (__b1 << 8) | __b2;                               \
 322})
 323#endif
 324
 325#define __get_user_asm_word(x, addr, err)                       \
 326        __get_user_asm(x, addr, err, ldr)
 327
 328
 329#define __put_user_switch(x, ptr, __err, __fn)                          \
 330        do {                                                            \
 331                const __typeof__(*(ptr)) __user *__pu_ptr = (ptr);      \
 332                __typeof__(*(ptr)) __pu_val = (x);                      \
 333                unsigned int __ua_flags;                                \
 334                might_fault();                                          \
 335                __ua_flags = uaccess_save_and_enable();                 \
 336                switch (sizeof(*(ptr))) {                               \
 337                case 1: __fn(__pu_val, __pu_ptr, __err, 1); break;      \
 338                case 2: __fn(__pu_val, __pu_ptr, __err, 2); break;      \
 339                case 4: __fn(__pu_val, __pu_ptr, __err, 4); break;      \
 340                case 8: __fn(__pu_val, __pu_ptr, __err, 8); break;      \
 341                default: __err = __put_user_bad(); break;               \
 342                }                                                       \
 343                uaccess_restore(__ua_flags);                            \
 344        } while (0)
 345
 346#define put_user(x, ptr)                                                \
 347({                                                                      \
 348        int __pu_err = 0;                                               \
 349        __put_user_switch((x), (ptr), __pu_err, __put_user_check);      \
 350        __pu_err;                                                       \
 351})
 352
 353#define __put_user(x, ptr)                                              \
 354({                                                                      \
 355        long __pu_err = 0;                                              \
 356        __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck);    \
 357        __pu_err;                                                       \
 358})
 359
 360#define __put_user_error(x, ptr, err)                                   \
 361({                                                                      \
 362        __put_user_switch((x), (ptr), (err), __put_user_nocheck);       \
 363        (void) 0;                                                       \
 364})
 365
 366#define __put_user_nocheck(x, __pu_ptr, __err, __size)                  \
 367        do {                                                            \
 368                unsigned long __pu_addr = (unsigned long)__pu_ptr;      \
 369                __put_user_nocheck_##__size(x, __pu_addr, __err);       \
 370        } while (0)
 371
 372#define __put_user_nocheck_1 __put_user_asm_byte
 373#define __put_user_nocheck_2 __put_user_asm_half
 374#define __put_user_nocheck_4 __put_user_asm_word
 375#define __put_user_nocheck_8 __put_user_asm_dword
 376
 377#define __put_user_asm(x, __pu_addr, err, instr)                \
 378        __asm__ __volatile__(                                   \
 379        "1:     " TUSER(instr) " %1, [%2], #0\n"                \
 380        "2:\n"                                                  \
 381        "       .pushsection .text.fixup,\"ax\"\n"              \
 382        "       .align  2\n"                                    \
 383        "3:     mov     %0, %3\n"                               \
 384        "       b       2b\n"                                   \
 385        "       .popsection\n"                                  \
 386        "       .pushsection __ex_table,\"a\"\n"                \
 387        "       .align  3\n"                                    \
 388        "       .long   1b, 3b\n"                               \
 389        "       .popsection"                                    \
 390        : "+r" (err)                                            \
 391        : "r" (x), "r" (__pu_addr), "i" (-EFAULT)               \
 392        : "cc")
 393
 394#define __put_user_asm_byte(x, __pu_addr, err)                  \
 395        __put_user_asm(x, __pu_addr, err, strb)
 396
 397#ifndef __ARMEB__
 398#define __put_user_asm_half(x, __pu_addr, err)                  \
 399({                                                              \
 400        unsigned long __temp = (__force unsigned long)(x);      \
 401        __put_user_asm_byte(__temp, __pu_addr, err);            \
 402        __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);   \
 403})
 404#else
 405#define __put_user_asm_half(x, __pu_addr, err)                  \
 406({                                                              \
 407        unsigned long __temp = (__force unsigned long)(x);      \
 408        __put_user_asm_byte(__temp >> 8, __pu_addr, err);       \
 409        __put_user_asm_byte(__temp, __pu_addr + 1, err);        \
 410})
 411#endif
 412
 413#define __put_user_asm_word(x, __pu_addr, err)                  \
 414        __put_user_asm(x, __pu_addr, err, str)
 415
 416#ifndef __ARMEB__
 417#define __reg_oper0     "%R2"
 418#define __reg_oper1     "%Q2"
 419#else
 420#define __reg_oper0     "%Q2"
 421#define __reg_oper1     "%R2"
 422#endif
 423
 424#define __put_user_asm_dword(x, __pu_addr, err)                 \
 425        __asm__ __volatile__(                                   \
 426 ARM(   "1:     " TUSER(str) "  " __reg_oper1 ", [%1], #4\n"    ) \
 427 ARM(   "2:     " TUSER(str) "  " __reg_oper0 ", [%1]\n"        ) \
 428 THUMB( "1:     " TUSER(str) "  " __reg_oper1 ", [%1]\n"        ) \
 429 THUMB( "2:     " TUSER(str) "  " __reg_oper0 ", [%1, #4]\n"    ) \
 430        "3:\n"                                                  \
 431        "       .pushsection .text.fixup,\"ax\"\n"              \
 432        "       .align  2\n"                                    \
 433        "4:     mov     %0, %3\n"                               \
 434        "       b       3b\n"                                   \
 435        "       .popsection\n"                                  \
 436        "       .pushsection __ex_table,\"a\"\n"                \
 437        "       .align  3\n"                                    \
 438        "       .long   1b, 4b\n"                               \
 439        "       .long   2b, 4b\n"                               \
 440        "       .popsection"                                    \
 441        : "+r" (err), "+r" (__pu_addr)                          \
 442        : "r" (x), "i" (-EFAULT)                                \
 443        : "cc")
 444
 445
 446#ifdef CONFIG_MMU
 447extern unsigned long __must_check
 448arm_copy_from_user(void *to, const void __user *from, unsigned long n);
 449
 450static inline unsigned long __must_check
 451raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 452{
 453        unsigned int __ua_flags;
 454
 455        __ua_flags = uaccess_save_and_enable();
 456        n = arm_copy_from_user(to, from, n);
 457        uaccess_restore(__ua_flags);
 458        return n;
 459}
 460
 461extern unsigned long __must_check
 462arm_copy_to_user(void __user *to, const void *from, unsigned long n);
 463extern unsigned long __must_check
 464__copy_to_user_std(void __user *to, const void *from, unsigned long n);
 465
 466static inline unsigned long __must_check
 467raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 468{
 469#ifndef CONFIG_UACCESS_WITH_MEMCPY
 470        unsigned int __ua_flags;
 471        __ua_flags = uaccess_save_and_enable();
 472        n = arm_copy_to_user(to, from, n);
 473        uaccess_restore(__ua_flags);
 474        return n;
 475#else
 476        return arm_copy_to_user(to, from, n);
 477#endif
 478}
 479
 480extern unsigned long __must_check
 481arm_clear_user(void __user *addr, unsigned long n);
 482extern unsigned long __must_check
 483__clear_user_std(void __user *addr, unsigned long n);
 484
 485static inline unsigned long __must_check
 486__clear_user(void __user *addr, unsigned long n)
 487{
 488        unsigned int __ua_flags = uaccess_save_and_enable();
 489        n = arm_clear_user(addr, n);
 490        uaccess_restore(__ua_flags);
 491        return n;
 492}
 493
 494#else
 495static inline unsigned long
 496raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 497{
 498        memcpy(to, (const void __force *)from, n);
 499        return 0;
 500}
 501static inline unsigned long
 502raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 503{
 504        memcpy((void __force *)to, from, n);
 505        return 0;
 506}
 507#define __clear_user(addr, n)           (memset((void __force *)addr, 0, n), 0)
 508#endif
 509#define INLINE_COPY_TO_USER
 510#define INLINE_COPY_FROM_USER
 511
 512static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
 513{
 514        if (access_ok(VERIFY_WRITE, to, n))
 515                n = __clear_user(to, n);
 516        return n;
 517}
 518
 519/* These are from lib/ code, and use __get_user() and friends */
 520extern long strncpy_from_user(char *dest, const char __user *src, long count);
 521
 522extern __must_check long strnlen_user(const char __user *str, long n);
 523
 524#endif /* _ASMARM_UACCESS_H */
 525