linux/arch/arm/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/uaccess.h
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8#ifndef _ASMARM_UACCESS_H
   9#define _ASMARM_UACCESS_H
  10
  11/*
  12 * User space memory access functions
  13 */
  14#include <linux/string.h>
  15#include <linux/thread_info.h>
  16#include <asm/errno.h>
  17#include <asm/memory.h>
  18#include <asm/domain.h>
  19#include <asm/system.h>
  20#include <asm/unified.h>
  21
  22#define VERIFY_READ 0
  23#define VERIFY_WRITE 1
  24
  25/*
  26 * The exception table consists of pairs of addresses: the first is the
  27 * address of an instruction that is allowed to fault, and the second is
  28 * the address at which the program should continue.  No registers are
  29 * modified, so it is entirely up to the continuation code to figure out
  30 * what to do.
  31 *
  32 * All the routines below use bits of fixup code that are out of line
  33 * with the main instruction path.  This means when everything is well,
  34 * we don't even have to jump over them.  Further, they do not intrude
  35 * on our cache or tlb entries.
  36 */
  37
  38struct exception_table_entry
  39{
  40        unsigned long insn, fixup;
  41};
  42
  43extern int fixup_exception(struct pt_regs *regs);
  44
  45/*
  46 * These two are intentionally not defined anywhere - if the kernel
  47 * code generates any references to them, that's a bug.
  48 */
  49extern int __get_user_bad(void);
  50extern int __put_user_bad(void);
  51
  52/*
  53 * Note that this is actually 0x1,0000,0000
  54 */
  55#define KERNEL_DS       0x00000000
  56#define get_ds()        (KERNEL_DS)
  57
  58#ifdef CONFIG_MMU
  59
  60#define USER_DS         TASK_SIZE
  61#define get_fs()        (current_thread_info()->addr_limit)
  62
  63static inline void set_fs(mm_segment_t fs)
  64{
  65        current_thread_info()->addr_limit = fs;
  66        modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
  67}
  68
  69#define segment_eq(a,b) ((a) == (b))
  70
  71#define __addr_ok(addr) ({ \
  72        unsigned long flag; \
  73        __asm__("cmp %2, %0; movlo %0, #0" \
  74                : "=&r" (flag) \
  75                : "0" (current_thread_info()->addr_limit), "r" (addr) \
  76                : "cc"); \
  77        (flag == 0); })
  78
  79/* We use 33-bit arithmetic here... */
  80#define __range_ok(addr,size) ({ \
  81        unsigned long flag, roksum; \
  82        __chk_user_ptr(addr);   \
  83        __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
  84                : "=&r" (flag), "=&r" (roksum) \
  85                : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
  86                : "cc"); \
  87        flag; })
  88
  89/*
  90 * Single-value transfer routines.  They automatically use the right
  91 * size if we just have the right pointer type.  Note that the functions
  92 * which read from user space (*get_*) need to take care not to leak
  93 * kernel data even if the calling code is buggy and fails to check
  94 * the return value.  This means zeroing out the destination variable
  95 * or buffer on error.  Normally this is done out of line by the
  96 * fixup code, but there are a few places where it intrudes on the
  97 * main code path.  When we only write to user space, there is no
  98 * problem.
  99 */
 100extern int __get_user_1(void *);
 101extern int __get_user_2(void *);
 102extern int __get_user_4(void *);
 103
 104#define __get_user_x(__r2,__p,__e,__s,__i...)                           \
 105           __asm__ __volatile__ (                                       \
 106                __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
 107                "bl     __get_user_" #__s                               \
 108                : "=&r" (__e), "=r" (__r2)                              \
 109                : "0" (__p)                                             \
 110                : __i, "cc")
 111
 112#define get_user(x,p)                                                   \
 113        ({                                                              \
 114                register const typeof(*(p)) __user *__p asm("r0") = (p);\
 115                register unsigned long __r2 asm("r2");                  \
 116                register int __e asm("r0");                             \
 117                switch (sizeof(*(__p))) {                               \
 118                case 1:                                                 \
 119                        __get_user_x(__r2, __p, __e, 1, "lr");          \
 120                        break;                                          \
 121                case 2:                                                 \
 122                        __get_user_x(__r2, __p, __e, 2, "r3", "lr");    \
 123                        break;                                          \
 124                case 4:                                                 \
 125                        __get_user_x(__r2, __p, __e, 4, "lr");          \
 126                        break;                                          \
 127                default: __e = __get_user_bad(); break;                 \
 128                }                                                       \
 129                x = (typeof(*(p))) __r2;                                \
 130                __e;                                                    \
 131        })
 132
 133extern int __put_user_1(void *, unsigned int);
 134extern int __put_user_2(void *, unsigned int);
 135extern int __put_user_4(void *, unsigned int);
 136extern int __put_user_8(void *, unsigned long long);
 137
 138#define __put_user_x(__r2,__p,__e,__s)                                  \
 139           __asm__ __volatile__ (                                       \
 140                __asmeq("%0", "r0") __asmeq("%2", "r2")                 \
 141                "bl     __put_user_" #__s                               \
 142                : "=&r" (__e)                                           \
 143                : "0" (__p), "r" (__r2)                                 \
 144                : "ip", "lr", "cc")
 145
 146#define put_user(x,p)                                                   \
 147        ({                                                              \
 148                register const typeof(*(p)) __r2 asm("r2") = (x);       \
 149                register const typeof(*(p)) __user *__p asm("r0") = (p);\
 150                register int __e asm("r0");                             \
 151                switch (sizeof(*(__p))) {                               \
 152                case 1:                                                 \
 153                        __put_user_x(__r2, __p, __e, 1);                \
 154                        break;                                          \
 155                case 2:                                                 \
 156                        __put_user_x(__r2, __p, __e, 2);                \
 157                        break;                                          \
 158                case 4:                                                 \
 159                        __put_user_x(__r2, __p, __e, 4);                \
 160                        break;                                          \
 161                case 8:                                                 \
 162                        __put_user_x(__r2, __p, __e, 8);                \
 163                        break;                                          \
 164                default: __e = __put_user_bad(); break;                 \
 165                }                                                       \
 166                __e;                                                    \
 167        })
 168
 169#else /* CONFIG_MMU */
 170
 171/*
 172 * uClinux has only one addr space, so has simplified address limits.
 173 */
 174#define USER_DS                 KERNEL_DS
 175
 176#define segment_eq(a,b)         (1)
 177#define __addr_ok(addr)         (1)
 178#define __range_ok(addr,size)   (0)
 179#define get_fs()                (KERNEL_DS)
 180
 181static inline void set_fs(mm_segment_t fs)
 182{
 183}
 184
 185#define get_user(x,p)   __get_user(x,p)
 186#define put_user(x,p)   __put_user(x,p)
 187
 188#endif /* CONFIG_MMU */
 189
 190#define access_ok(type,addr,size)       (__range_ok(addr,size) == 0)
 191
 192/*
 193 * The "__xxx" versions of the user access functions do not verify the
 194 * address space - it must have been done previously with a separate
 195 * "access_ok()" call.
 196 *
 197 * The "xxx_error" versions set the third argument to EFAULT if an
 198 * error occurs, and leave it unchanged on success.  Note that these
 199 * versions are void (ie, don't return a value as such).
 200 */
 201#define __get_user(x,ptr)                                               \
 202({                                                                      \
 203        long __gu_err = 0;                                              \
 204        __get_user_err((x),(ptr),__gu_err);                             \
 205        __gu_err;                                                       \
 206})
 207
 208#define __get_user_error(x,ptr,err)                                     \
 209({                                                                      \
 210        __get_user_err((x),(ptr),err);                                  \
 211        (void) 0;                                                       \
 212})
 213
 214#define __get_user_err(x,ptr,err)                                       \
 215do {                                                                    \
 216        unsigned long __gu_addr = (unsigned long)(ptr);                 \
 217        unsigned long __gu_val;                                         \
 218        __chk_user_ptr(ptr);                                            \
 219        switch (sizeof(*(ptr))) {                                       \
 220        case 1: __get_user_asm_byte(__gu_val,__gu_addr,err);    break;  \
 221        case 2: __get_user_asm_half(__gu_val,__gu_addr,err);    break;  \
 222        case 4: __get_user_asm_word(__gu_val,__gu_addr,err);    break;  \
 223        default: (__gu_val) = __get_user_bad();                         \
 224        }                                                               \
 225        (x) = (__typeof__(*(ptr)))__gu_val;                             \
 226} while (0)
 227
 228#define __get_user_asm_byte(x,addr,err)                         \
 229        __asm__ __volatile__(                                   \
 230        "1:     ldrbt   %1,[%2]\n"                              \
 231        "2:\n"                                                  \
 232        "       .section .fixup,\"ax\"\n"                       \
 233        "       .align  2\n"                                    \
 234        "3:     mov     %0, %3\n"                               \
 235        "       mov     %1, #0\n"                               \
 236        "       b       2b\n"                                   \
 237        "       .previous\n"                                    \
 238        "       .section __ex_table,\"a\"\n"                    \
 239        "       .align  3\n"                                    \
 240        "       .long   1b, 3b\n"                               \
 241        "       .previous"                                      \
 242        : "+r" (err), "=&r" (x)                                 \
 243        : "r" (addr), "i" (-EFAULT)                             \
 244        : "cc")
 245
 246#ifndef __ARMEB__
 247#define __get_user_asm_half(x,__gu_addr,err)                    \
 248({                                                              \
 249        unsigned long __b1, __b2;                               \
 250        __get_user_asm_byte(__b1, __gu_addr, err);              \
 251        __get_user_asm_byte(__b2, __gu_addr + 1, err);          \
 252        (x) = __b1 | (__b2 << 8);                               \
 253})
 254#else
 255#define __get_user_asm_half(x,__gu_addr,err)                    \
 256({                                                              \
 257        unsigned long __b1, __b2;                               \
 258        __get_user_asm_byte(__b1, __gu_addr, err);              \
 259        __get_user_asm_byte(__b2, __gu_addr + 1, err);          \
 260        (x) = (__b1 << 8) | __b2;                               \
 261})
 262#endif
 263
 264#define __get_user_asm_word(x,addr,err)                         \
 265        __asm__ __volatile__(                                   \
 266        "1:     ldrt    %1,[%2]\n"                              \
 267        "2:\n"                                                  \
 268        "       .section .fixup,\"ax\"\n"                       \
 269        "       .align  2\n"                                    \
 270        "3:     mov     %0, %3\n"                               \
 271        "       mov     %1, #0\n"                               \
 272        "       b       2b\n"                                   \
 273        "       .previous\n"                                    \
 274        "       .section __ex_table,\"a\"\n"                    \
 275        "       .align  3\n"                                    \
 276        "       .long   1b, 3b\n"                               \
 277        "       .previous"                                      \
 278        : "+r" (err), "=&r" (x)                                 \
 279        : "r" (addr), "i" (-EFAULT)                             \
 280        : "cc")
 281
 282#define __put_user(x,ptr)                                               \
 283({                                                                      \
 284        long __pu_err = 0;                                              \
 285        __put_user_err((x),(ptr),__pu_err);                             \
 286        __pu_err;                                                       \
 287})
 288
 289#define __put_user_error(x,ptr,err)                                     \
 290({                                                                      \
 291        __put_user_err((x),(ptr),err);                                  \
 292        (void) 0;                                                       \
 293})
 294
 295#define __put_user_err(x,ptr,err)                                       \
 296do {                                                                    \
 297        unsigned long __pu_addr = (unsigned long)(ptr);                 \
 298        __typeof__(*(ptr)) __pu_val = (x);                              \
 299        __chk_user_ptr(ptr);                                            \
 300        switch (sizeof(*(ptr))) {                                       \
 301        case 1: __put_user_asm_byte(__pu_val,__pu_addr,err);    break;  \
 302        case 2: __put_user_asm_half(__pu_val,__pu_addr,err);    break;  \
 303        case 4: __put_user_asm_word(__pu_val,__pu_addr,err);    break;  \
 304        case 8: __put_user_asm_dword(__pu_val,__pu_addr,err);   break;  \
 305        default: __put_user_bad();                                      \
 306        }                                                               \
 307} while (0)
 308
 309#define __put_user_asm_byte(x,__pu_addr,err)                    \
 310        __asm__ __volatile__(                                   \
 311        "1:     strbt   %1,[%2]\n"                              \
 312        "2:\n"                                                  \
 313        "       .section .fixup,\"ax\"\n"                       \
 314        "       .align  2\n"                                    \
 315        "3:     mov     %0, %3\n"                               \
 316        "       b       2b\n"                                   \
 317        "       .previous\n"                                    \
 318        "       .section __ex_table,\"a\"\n"                    \
 319        "       .align  3\n"                                    \
 320        "       .long   1b, 3b\n"                               \
 321        "       .previous"                                      \
 322        : "+r" (err)                                            \
 323        : "r" (x), "r" (__pu_addr), "i" (-EFAULT)               \
 324        : "cc")
 325
 326#ifndef __ARMEB__
 327#define __put_user_asm_half(x,__pu_addr,err)                    \
 328({                                                              \
 329        unsigned long __temp = (unsigned long)(x);              \
 330        __put_user_asm_byte(__temp, __pu_addr, err);            \
 331        __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);   \
 332})
 333#else
 334#define __put_user_asm_half(x,__pu_addr,err)                    \
 335({                                                              \
 336        unsigned long __temp = (unsigned long)(x);              \
 337        __put_user_asm_byte(__temp >> 8, __pu_addr, err);       \
 338        __put_user_asm_byte(__temp, __pu_addr + 1, err);        \
 339})
 340#endif
 341
 342#define __put_user_asm_word(x,__pu_addr,err)                    \
 343        __asm__ __volatile__(                                   \
 344        "1:     strt    %1,[%2]\n"                              \
 345        "2:\n"                                                  \
 346        "       .section .fixup,\"ax\"\n"                       \
 347        "       .align  2\n"                                    \
 348        "3:     mov     %0, %3\n"                               \
 349        "       b       2b\n"                                   \
 350        "       .previous\n"                                    \
 351        "       .section __ex_table,\"a\"\n"                    \
 352        "       .align  3\n"                                    \
 353        "       .long   1b, 3b\n"                               \
 354        "       .previous"                                      \
 355        : "+r" (err)                                            \
 356        : "r" (x), "r" (__pu_addr), "i" (-EFAULT)               \
 357        : "cc")
 358
 359#ifndef __ARMEB__
 360#define __reg_oper0     "%R2"
 361#define __reg_oper1     "%Q2"
 362#else
 363#define __reg_oper0     "%Q2"
 364#define __reg_oper1     "%R2"
 365#endif
 366
 367#define __put_user_asm_dword(x,__pu_addr,err)                   \
 368        __asm__ __volatile__(                                   \
 369 ARM(   "1:     strt    " __reg_oper1 ", [%1], #4\n"    )       \
 370 ARM(   "2:     strt    " __reg_oper0 ", [%1]\n"        )       \
 371 THUMB( "1:     strt    " __reg_oper1 ", [%1]\n"        )       \
 372 THUMB( "2:     strt    " __reg_oper0 ", [%1, #4]\n"    )       \
 373        "3:\n"                                                  \
 374        "       .section .fixup,\"ax\"\n"                       \
 375        "       .align  2\n"                                    \
 376        "4:     mov     %0, %3\n"                               \
 377        "       b       3b\n"                                   \
 378        "       .previous\n"                                    \
 379        "       .section __ex_table,\"a\"\n"                    \
 380        "       .align  3\n"                                    \
 381        "       .long   1b, 4b\n"                               \
 382        "       .long   2b, 4b\n"                               \
 383        "       .previous"                                      \
 384        : "+r" (err), "+r" (__pu_addr)                          \
 385        : "r" (x), "i" (-EFAULT)                                \
 386        : "cc")
 387
 388
 389#ifdef CONFIG_MMU
 390extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
 391extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
 392extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
 393extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
 394extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
 395#else
 396#define __copy_from_user(to,from,n)     (memcpy(to, (void __force *)from, n), 0)
 397#define __copy_to_user(to,from,n)       (memcpy((void __force *)to, from, n), 0)
 398#define __clear_user(addr,n)            (memset((void __force *)addr, 0, n), 0)
 399#endif
 400
 401extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count);
 402extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
 403
 404static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
 405{
 406        if (access_ok(VERIFY_READ, from, n))
 407                n = __copy_from_user(to, from, n);
 408        else /* security hole - plug it */
 409                memset(to, 0, n);
 410        return n;
 411}
 412
 413static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
 414{
 415        if (access_ok(VERIFY_WRITE, to, n))
 416                n = __copy_to_user(to, from, n);
 417        return n;
 418}
 419
 420#define __copy_to_user_inatomic __copy_to_user
 421#define __copy_from_user_inatomic __copy_from_user
 422
 423static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
 424{
 425        if (access_ok(VERIFY_WRITE, to, n))
 426                n = __clear_user(to, n);
 427        return n;
 428}
 429
 430static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count)
 431{
 432        long res = -EFAULT;
 433        if (access_ok(VERIFY_READ, src, 1))
 434                res = __strncpy_from_user(dst, src, count);
 435        return res;
 436}
 437
 438#define strlen_user(s)  strnlen_user(s, ~0UL >> 1)
 439
 440static inline long __must_check strnlen_user(const char __user *s, long n)
 441{
 442        unsigned long res = 0;
 443
 444        if (__addr_ok(s))
 445                res = __strnlen_user(s, n);
 446
 447        return res;
 448}
 449
 450#endif /* _ASMARM_UACCESS_H */
 451