linux/arch/arm/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/uaccess.h
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8#ifndef _ASMARM_UACCESS_H
   9#define _ASMARM_UACCESS_H
  10
  11/*
  12 * User space memory access functions
  13 */
  14#include <linux/string.h>
  15#include <linux/thread_info.h>
  16#include <asm/errno.h>
  17#include <asm/memory.h>
  18#include <asm/domain.h>
  19#include <asm/unified.h>
  20#include <asm/compiler.h>
  21
  22#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  23#include <asm-generic/uaccess-unaligned.h>
  24#else
  25#define __get_user_unaligned __get_user
  26#define __put_user_unaligned __put_user
  27#endif
  28
  29#define VERIFY_READ 0
  30#define VERIFY_WRITE 1
  31
  32/*
  33 * The exception table consists of pairs of addresses: the first is the
  34 * address of an instruction that is allowed to fault, and the second is
  35 * the address at which the program should continue.  No registers are
  36 * modified, so it is entirely up to the continuation code to figure out
  37 * what to do.
  38 *
  39 * All the routines below use bits of fixup code that are out of line
  40 * with the main instruction path.  This means when everything is well,
  41 * we don't even have to jump over them.  Further, they do not intrude
  42 * on our cache or tlb entries.
  43 */
  44
  45struct exception_table_entry
  46{
  47        unsigned long insn, fixup;
  48};
  49
  50extern int fixup_exception(struct pt_regs *regs);
  51
  52/*
  53 * These two are intentionally not defined anywhere - if the kernel
  54 * code generates any references to them, that's a bug.
  55 */
  56extern int __get_user_bad(void);
  57extern int __put_user_bad(void);
  58
  59/*
  60 * Note that this is actually 0x1,0000,0000
  61 */
  62#define KERNEL_DS       0x00000000
  63#define get_ds()        (KERNEL_DS)
  64
  65#ifdef CONFIG_MMU
  66
  67#define USER_DS         TASK_SIZE
  68#define get_fs()        (current_thread_info()->addr_limit)
  69
  70static inline void set_fs(mm_segment_t fs)
  71{
  72        current_thread_info()->addr_limit = fs;
  73        modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
  74}
  75
  76#define segment_eq(a, b)        ((a) == (b))
  77
  78#define __addr_ok(addr) ({ \
  79        unsigned long flag; \
  80        __asm__("cmp %2, %0; movlo %0, #0" \
  81                : "=&r" (flag) \
  82                : "0" (current_thread_info()->addr_limit), "r" (addr) \
  83                : "cc"); \
  84        (flag == 0); })
  85
  86/* We use 33-bit arithmetic here... */
  87#define __range_ok(addr, size) ({ \
  88        unsigned long flag, roksum; \
  89        __chk_user_ptr(addr);   \
  90        __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
  91                : "=&r" (flag), "=&r" (roksum) \
  92                : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
  93                : "cc"); \
  94        flag; })
  95
  96/*
  97 * Single-value transfer routines.  They automatically use the right
  98 * size if we just have the right pointer type.  Note that the functions
  99 * which read from user space (*get_*) need to take care not to leak
 100 * kernel data even if the calling code is buggy and fails to check
 101 * the return value.  This means zeroing out the destination variable
 102 * or buffer on error.  Normally this is done out of line by the
 103 * fixup code, but there are a few places where it intrudes on the
 104 * main code path.  When we only write to user space, there is no
 105 * problem.
 106 */
 107extern int __get_user_1(void *);
 108extern int __get_user_2(void *);
 109extern int __get_user_4(void *);
 110extern int __get_user_32t_8(void *);
 111extern int __get_user_8(void *);
 112extern int __get_user_64t_1(void *);
 113extern int __get_user_64t_2(void *);
 114extern int __get_user_64t_4(void *);
 115
 116#define __GUP_CLOBBER_1 "lr", "cc"
 117#ifdef CONFIG_CPU_USE_DOMAINS
 118#define __GUP_CLOBBER_2 "ip", "lr", "cc"
 119#else
 120#define __GUP_CLOBBER_2 "lr", "cc"
 121#endif
 122#define __GUP_CLOBBER_4 "lr", "cc"
 123#define __GUP_CLOBBER_32t_8 "lr", "cc"
 124#define __GUP_CLOBBER_8 "lr", "cc"
 125
 126#define __get_user_x(__r2, __p, __e, __l, __s)                          \
 127           __asm__ __volatile__ (                                       \
 128                __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
 129                __asmeq("%3", "r1")                                     \
 130                "bl     __get_user_" #__s                               \
 131                : "=&r" (__e), "=r" (__r2)                              \
 132                : "0" (__p), "r" (__l)                                  \
 133                : __GUP_CLOBBER_##__s)
 134
 135/* narrowing a double-word get into a single 32bit word register: */
 136#ifdef __ARMEB__
 137#define __get_user_x_32t(__r2, __p, __e, __l, __s)                      \
 138        __get_user_x(__r2, __p, __e, __l, 32t_8)
 139#else
 140#define __get_user_x_32t __get_user_x
 141#endif
 142
 143/*
 144 * storing result into proper least significant word of 64bit target var,
 145 * different only for big endian case where 64 bit __r2 lsw is r3:
 146 */
 147#ifdef __ARMEB__
 148#define __get_user_x_64t(__r2, __p, __e, __l, __s)                      \
 149           __asm__ __volatile__ (                                       \
 150                __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
 151                __asmeq("%3", "r1")                                     \
 152                "bl     __get_user_64t_" #__s                           \
 153                : "=&r" (__e), "=r" (__r2)                              \
 154                : "0" (__p), "r" (__l)                                  \
 155                : __GUP_CLOBBER_##__s)
 156#else
 157#define __get_user_x_64t __get_user_x
 158#endif
 159
 160
 161#define __get_user_check(x, p)                                          \
 162        ({                                                              \
 163                unsigned long __limit = current_thread_info()->addr_limit - 1; \
 164                register const typeof(*(p)) __user *__p asm("r0") = (p);\
 165                register typeof(x) __r2 asm("r2");                      \
 166                register unsigned long __l asm("r1") = __limit;         \
 167                register int __e asm("r0");                             \
 168                switch (sizeof(*(__p))) {                               \
 169                case 1:                                                 \
 170                        if (sizeof((x)) >= 8)                           \
 171                                __get_user_x_64t(__r2, __p, __e, __l, 1); \
 172                        else                                            \
 173                                __get_user_x(__r2, __p, __e, __l, 1);   \
 174                        break;                                          \
 175                case 2:                                                 \
 176                        if (sizeof((x)) >= 8)                           \
 177                                __get_user_x_64t(__r2, __p, __e, __l, 2); \
 178                        else                                            \
 179                                __get_user_x(__r2, __p, __e, __l, 2);   \
 180                        break;                                          \
 181                case 4:                                                 \
 182                        if (sizeof((x)) >= 8)                           \
 183                                __get_user_x_64t(__r2, __p, __e, __l, 4); \
 184                        else                                            \
 185                                __get_user_x(__r2, __p, __e, __l, 4);   \
 186                        break;                                          \
 187                case 8:                                                 \
 188                        if (sizeof((x)) < 8)                            \
 189                                __get_user_x_32t(__r2, __p, __e, __l, 4); \
 190                        else                                            \
 191                                __get_user_x(__r2, __p, __e, __l, 8);   \
 192                        break;                                          \
 193                default: __e = __get_user_bad(); break;                 \
 194                }                                                       \
 195                x = (typeof(*(p))) __r2;                                \
 196                __e;                                                    \
 197        })
 198
 199#define get_user(x, p)                                                  \
 200        ({                                                              \
 201                might_fault();                                          \
 202                __get_user_check(x, p);                                 \
 203         })
 204
 205extern int __put_user_1(void *, unsigned int);
 206extern int __put_user_2(void *, unsigned int);
 207extern int __put_user_4(void *, unsigned int);
 208extern int __put_user_8(void *, unsigned long long);
 209
 210#define __put_user_x(__r2, __p, __e, __l, __s)                          \
 211           __asm__ __volatile__ (                                       \
 212                __asmeq("%0", "r0") __asmeq("%2", "r2")                 \
 213                __asmeq("%3", "r1")                                     \
 214                "bl     __put_user_" #__s                               \
 215                : "=&r" (__e)                                           \
 216                : "0" (__p), "r" (__r2), "r" (__l)                      \
 217                : "ip", "lr", "cc")
 218
 219#define __put_user_check(x, p)                                          \
 220        ({                                                              \
 221                unsigned long __limit = current_thread_info()->addr_limit - 1; \
 222                const typeof(*(p)) __user *__tmp_p = (p);               \
 223                register const typeof(*(p)) __r2 asm("r2") = (x);       \
 224                register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
 225                register unsigned long __l asm("r1") = __limit;         \
 226                register int __e asm("r0");                             \
 227                switch (sizeof(*(__p))) {                               \
 228                case 1:                                                 \
 229                        __put_user_x(__r2, __p, __e, __l, 1);           \
 230                        break;                                          \
 231                case 2:                                                 \
 232                        __put_user_x(__r2, __p, __e, __l, 2);           \
 233                        break;                                          \
 234                case 4:                                                 \
 235                        __put_user_x(__r2, __p, __e, __l, 4);           \
 236                        break;                                          \
 237                case 8:                                                 \
 238                        __put_user_x(__r2, __p, __e, __l, 8);           \
 239                        break;                                          \
 240                default: __e = __put_user_bad(); break;                 \
 241                }                                                       \
 242                __e;                                                    \
 243        })
 244
 245#define put_user(x, p)                                                  \
 246        ({                                                              \
 247                might_fault();                                          \
 248                __put_user_check(x, p);                                 \
 249         })
 250
 251#else /* CONFIG_MMU */
 252
 253/*
 254 * uClinux has only one addr space, so has simplified address limits.
 255 */
 256#define USER_DS                 KERNEL_DS
 257
 258#define segment_eq(a, b)                (1)
 259#define __addr_ok(addr)         ((void)(addr), 1)
 260#define __range_ok(addr, size)  ((void)(addr), 0)
 261#define get_fs()                (KERNEL_DS)
 262
 263static inline void set_fs(mm_segment_t fs)
 264{
 265}
 266
 267#define get_user(x, p)  __get_user(x, p)
 268#define put_user(x, p)  __put_user(x, p)
 269
 270#endif /* CONFIG_MMU */
 271
 272#define access_ok(type, addr, size)     (__range_ok(addr, size) == 0)
 273
 274#define user_addr_max() \
 275        (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
 276
 277/*
 278 * The "__xxx" versions of the user access functions do not verify the
 279 * address space - it must have been done previously with a separate
 280 * "access_ok()" call.
 281 *
 282 * The "xxx_error" versions set the third argument to EFAULT if an
 283 * error occurs, and leave it unchanged on success.  Note that these
 284 * versions are void (ie, don't return a value as such).
 285 */
 286#define __get_user(x, ptr)                                              \
 287({                                                                      \
 288        long __gu_err = 0;                                              \
 289        __get_user_err((x), (ptr), __gu_err);                           \
 290        __gu_err;                                                       \
 291})
 292
 293#define __get_user_error(x, ptr, err)                                   \
 294({                                                                      \
 295        __get_user_err((x), (ptr), err);                                \
 296        (void) 0;                                                       \
 297})
 298
 299#define __get_user_err(x, ptr, err)                                     \
 300do {                                                                    \
 301        unsigned long __gu_addr = (unsigned long)(ptr);                 \
 302        unsigned long __gu_val;                                         \
 303        __chk_user_ptr(ptr);                                            \
 304        might_fault();                                                  \
 305        switch (sizeof(*(ptr))) {                                       \
 306        case 1: __get_user_asm_byte(__gu_val, __gu_addr, err);  break;  \
 307        case 2: __get_user_asm_half(__gu_val, __gu_addr, err);  break;  \
 308        case 4: __get_user_asm_word(__gu_val, __gu_addr, err);  break;  \
 309        default: (__gu_val) = __get_user_bad();                         \
 310        }                                                               \
 311        (x) = (__typeof__(*(ptr)))__gu_val;                             \
 312} while (0)
 313
 314#define __get_user_asm_byte(x, addr, err)                       \
 315        __asm__ __volatile__(                                   \
 316        "1:     " TUSER(ldrb) " %1,[%2],#0\n"                   \
 317        "2:\n"                                                  \
 318        "       .pushsection .text.fixup,\"ax\"\n"              \
 319        "       .align  2\n"                                    \
 320        "3:     mov     %0, %3\n"                               \
 321        "       mov     %1, #0\n"                               \
 322        "       b       2b\n"                                   \
 323        "       .popsection\n"                                  \
 324        "       .pushsection __ex_table,\"a\"\n"                \
 325        "       .align  3\n"                                    \
 326        "       .long   1b, 3b\n"                               \
 327        "       .popsection"                                    \
 328        : "+r" (err), "=&r" (x)                                 \
 329        : "r" (addr), "i" (-EFAULT)                             \
 330        : "cc")
 331
 332#ifndef __ARMEB__
 333#define __get_user_asm_half(x, __gu_addr, err)                  \
 334({                                                              \
 335        unsigned long __b1, __b2;                               \
 336        __get_user_asm_byte(__b1, __gu_addr, err);              \
 337        __get_user_asm_byte(__b2, __gu_addr + 1, err);          \
 338        (x) = __b1 | (__b2 << 8);                               \
 339})
 340#else
 341#define __get_user_asm_half(x, __gu_addr, err)                  \
 342({                                                              \
 343        unsigned long __b1, __b2;                               \
 344        __get_user_asm_byte(__b1, __gu_addr, err);              \
 345        __get_user_asm_byte(__b2, __gu_addr + 1, err);          \
 346        (x) = (__b1 << 8) | __b2;                               \
 347})
 348#endif
 349
 350#define __get_user_asm_word(x, addr, err)                       \
 351        __asm__ __volatile__(                                   \
 352        "1:     " TUSER(ldr) "  %1,[%2],#0\n"                   \
 353        "2:\n"                                                  \
 354        "       .pushsection .text.fixup,\"ax\"\n"              \
 355        "       .align  2\n"                                    \
 356        "3:     mov     %0, %3\n"                               \
 357        "       mov     %1, #0\n"                               \
 358        "       b       2b\n"                                   \
 359        "       .popsection\n"                                  \
 360        "       .pushsection __ex_table,\"a\"\n"                \
 361        "       .align  3\n"                                    \
 362        "       .long   1b, 3b\n"                               \
 363        "       .popsection"                                    \
 364        : "+r" (err), "=&r" (x)                                 \
 365        : "r" (addr), "i" (-EFAULT)                             \
 366        : "cc")
 367
 368#define __put_user(x, ptr)                                              \
 369({                                                                      \
 370        long __pu_err = 0;                                              \
 371        __put_user_err((x), (ptr), __pu_err);                           \
 372        __pu_err;                                                       \
 373})
 374
 375#define __put_user_error(x, ptr, err)                                   \
 376({                                                                      \
 377        __put_user_err((x), (ptr), err);                                \
 378        (void) 0;                                                       \
 379})
 380
 381#define __put_user_err(x, ptr, err)                                     \
 382do {                                                                    \
 383        unsigned long __pu_addr = (unsigned long)(ptr);                 \
 384        __typeof__(*(ptr)) __pu_val = (x);                              \
 385        __chk_user_ptr(ptr);                                            \
 386        might_fault();                                                  \
 387        switch (sizeof(*(ptr))) {                                       \
 388        case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);  break;  \
 389        case 2: __put_user_asm_half(__pu_val, __pu_addr, err);  break;  \
 390        case 4: __put_user_asm_word(__pu_val, __pu_addr, err);  break;  \
 391        case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break;  \
 392        default: __put_user_bad();                                      \
 393        }                                                               \
 394} while (0)
 395
 396#define __put_user_asm_byte(x, __pu_addr, err)                  \
 397        __asm__ __volatile__(                                   \
 398        "1:     " TUSER(strb) " %1,[%2],#0\n"                   \
 399        "2:\n"                                                  \
 400        "       .pushsection .text.fixup,\"ax\"\n"              \
 401        "       .align  2\n"                                    \
 402        "3:     mov     %0, %3\n"                               \
 403        "       b       2b\n"                                   \
 404        "       .popsection\n"                                  \
 405        "       .pushsection __ex_table,\"a\"\n"                \
 406        "       .align  3\n"                                    \
 407        "       .long   1b, 3b\n"                               \
 408        "       .popsection"                                    \
 409        : "+r" (err)                                            \
 410        : "r" (x), "r" (__pu_addr), "i" (-EFAULT)               \
 411        : "cc")
 412
 413#ifndef __ARMEB__
 414#define __put_user_asm_half(x, __pu_addr, err)                  \
 415({                                                              \
 416        unsigned long __temp = (__force unsigned long)(x);      \
 417        __put_user_asm_byte(__temp, __pu_addr, err);            \
 418        __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);   \
 419})
 420#else
 421#define __put_user_asm_half(x, __pu_addr, err)                  \
 422({                                                              \
 423        unsigned long __temp = (__force unsigned long)(x);      \
 424        __put_user_asm_byte(__temp >> 8, __pu_addr, err);       \
 425        __put_user_asm_byte(__temp, __pu_addr + 1, err);        \
 426})
 427#endif
 428
 429#define __put_user_asm_word(x, __pu_addr, err)                  \
 430        __asm__ __volatile__(                                   \
 431        "1:     " TUSER(str) "  %1,[%2],#0\n"                   \
 432        "2:\n"                                                  \
 433        "       .pushsection .text.fixup,\"ax\"\n"              \
 434        "       .align  2\n"                                    \
 435        "3:     mov     %0, %3\n"                               \
 436        "       b       2b\n"                                   \
 437        "       .popsection\n"                                  \
 438        "       .pushsection __ex_table,\"a\"\n"                \
 439        "       .align  3\n"                                    \
 440        "       .long   1b, 3b\n"                               \
 441        "       .popsection"                                    \
 442        : "+r" (err)                                            \
 443        : "r" (x), "r" (__pu_addr), "i" (-EFAULT)               \
 444        : "cc")
 445
 446#ifndef __ARMEB__
 447#define __reg_oper0     "%R2"
 448#define __reg_oper1     "%Q2"
 449#else
 450#define __reg_oper0     "%Q2"
 451#define __reg_oper1     "%R2"
 452#endif
 453
 454#define __put_user_asm_dword(x, __pu_addr, err)                 \
 455        __asm__ __volatile__(                                   \
 456 ARM(   "1:     " TUSER(str) "  " __reg_oper1 ", [%1], #4\n"    ) \
 457 ARM(   "2:     " TUSER(str) "  " __reg_oper0 ", [%1]\n"        ) \
 458 THUMB( "1:     " TUSER(str) "  " __reg_oper1 ", [%1]\n"        ) \
 459 THUMB( "2:     " TUSER(str) "  " __reg_oper0 ", [%1, #4]\n"    ) \
 460        "3:\n"                                                  \
 461        "       .pushsection .text.fixup,\"ax\"\n"              \
 462        "       .align  2\n"                                    \
 463        "4:     mov     %0, %3\n"                               \
 464        "       b       3b\n"                                   \
 465        "       .popsection\n"                                  \
 466        "       .pushsection __ex_table,\"a\"\n"                \
 467        "       .align  3\n"                                    \
 468        "       .long   1b, 4b\n"                               \
 469        "       .long   2b, 4b\n"                               \
 470        "       .popsection"                                    \
 471        : "+r" (err), "+r" (__pu_addr)                          \
 472        : "r" (x), "i" (-EFAULT)                                \
 473        : "cc")
 474
 475
 476#ifdef CONFIG_MMU
 477extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
 478extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
 479extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
 480extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
 481extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
 482#else
 483#define __copy_from_user(to, from, n)   (memcpy(to, (void __force *)from, n), 0)
 484#define __copy_to_user(to, from, n)     (memcpy((void __force *)to, from, n), 0)
 485#define __clear_user(addr, n)           (memset((void __force *)addr, 0, n), 0)
 486#endif
 487
 488static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
 489{
 490        if (access_ok(VERIFY_READ, from, n))
 491                n = __copy_from_user(to, from, n);
 492        else /* security hole - plug it */
 493                memset(to, 0, n);
 494        return n;
 495}
 496
 497static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
 498{
 499        if (access_ok(VERIFY_WRITE, to, n))
 500                n = __copy_to_user(to, from, n);
 501        return n;
 502}
 503
 504#define __copy_to_user_inatomic __copy_to_user
 505#define __copy_from_user_inatomic __copy_from_user
 506
 507static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
 508{
 509        if (access_ok(VERIFY_WRITE, to, n))
 510                n = __clear_user(to, n);
 511        return n;
 512}
 513
 514extern long strncpy_from_user(char *dest, const char __user *src, long count);
 515
 516extern __must_check long strlen_user(const char __user *str);
 517extern __must_check long strnlen_user(const char __user *str, long n);
 518
 519#endif /* _ASMARM_UACCESS_H */
 520