linux/arch/arm/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/uaccess.h
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8#ifndef _ASMARM_UACCESS_H
   9#define _ASMARM_UACCESS_H
  10
  11/*
  12 * User space memory access functions
  13 */
  14#include <linux/string.h>
  15#include <linux/thread_info.h>
  16#include <asm/errno.h>
  17#include <asm/memory.h>
  18#include <asm/domain.h>
  19#include <asm/unified.h>
  20#include <asm/compiler.h>
  21
  22#define VERIFY_READ 0
  23#define VERIFY_WRITE 1
  24
  25/*
  26 * The exception table consists of pairs of addresses: the first is the
  27 * address of an instruction that is allowed to fault, and the second is
  28 * the address at which the program should continue.  No registers are
  29 * modified, so it is entirely up to the continuation code to figure out
  30 * what to do.
  31 *
  32 * All the routines below use bits of fixup code that are out of line
  33 * with the main instruction path.  This means when everything is well,
  34 * we don't even have to jump over them.  Further, they do not intrude
  35 * on our cache or tlb entries.
  36 */
  37
  38struct exception_table_entry
  39{
  40        unsigned long insn, fixup;
  41};
  42
  43extern int fixup_exception(struct pt_regs *regs);
  44
  45/*
  46 * These two are intentionally not defined anywhere - if the kernel
  47 * code generates any references to them, that's a bug.
  48 */
  49extern int __get_user_bad(void);
  50extern int __put_user_bad(void);
  51
  52/*
  53 * Note that this is actually 0x1,0000,0000
  54 */
  55#define KERNEL_DS       0x00000000
  56#define get_ds()        (KERNEL_DS)
  57
  58#ifdef CONFIG_MMU
  59
  60#define USER_DS         TASK_SIZE
  61#define get_fs()        (current_thread_info()->addr_limit)
  62
  63static inline void set_fs(mm_segment_t fs)
  64{
  65        current_thread_info()->addr_limit = fs;
  66        modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
  67}
  68
  69#define segment_eq(a,b) ((a) == (b))
  70
  71#define __addr_ok(addr) ({ \
  72        unsigned long flag; \
  73        __asm__("cmp %2, %0; movlo %0, #0" \
  74                : "=&r" (flag) \
  75                : "0" (current_thread_info()->addr_limit), "r" (addr) \
  76                : "cc"); \
  77        (flag == 0); })
  78
  79/* We use 33-bit arithmetic here... */
  80#define __range_ok(addr,size) ({ \
  81        unsigned long flag, roksum; \
  82        __chk_user_ptr(addr);   \
  83        __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
  84                : "=&r" (flag), "=&r" (roksum) \
  85                : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
  86                : "cc"); \
  87        flag; })
  88
  89/*
  90 * Single-value transfer routines.  They automatically use the right
  91 * size if we just have the right pointer type.  Note that the functions
  92 * which read from user space (*get_*) need to take care not to leak
  93 * kernel data even if the calling code is buggy and fails to check
  94 * the return value.  This means zeroing out the destination variable
  95 * or buffer on error.  Normally this is done out of line by the
  96 * fixup code, but there are a few places where it intrudes on the
  97 * main code path.  When we only write to user space, there is no
  98 * problem.
  99 */
 100extern int __get_user_1(void *);
 101extern int __get_user_2(void *);
 102extern int __get_user_4(void *);
 103
 104#define __GUP_CLOBBER_1 "lr", "cc"
 105#ifdef CONFIG_CPU_USE_DOMAINS
 106#define __GUP_CLOBBER_2 "ip", "lr", "cc"
 107#else
 108#define __GUP_CLOBBER_2 "lr", "cc"
 109#endif
 110#define __GUP_CLOBBER_4 "lr", "cc"
 111
 112#define __get_user_x(__r2,__p,__e,__l,__s)                              \
 113           __asm__ __volatile__ (                                       \
 114                __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
 115                __asmeq("%3", "r1")                                     \
 116                "bl     __get_user_" #__s                               \
 117                : "=&r" (__e), "=r" (__r2)                              \
 118                : "0" (__p), "r" (__l)                                  \
 119                : __GUP_CLOBBER_##__s)
 120
 121#define __get_user_check(x,p)                                                   \
 122        ({                                                              \
 123                unsigned long __limit = current_thread_info()->addr_limit - 1; \
 124                register const typeof(*(p)) __user *__p asm("r0") = (p);\
 125                register unsigned long __r2 asm("r2");                  \
 126                register unsigned long __l asm("r1") = __limit;         \
 127                register int __e asm("r0");                             \
 128                switch (sizeof(*(__p))) {                               \
 129                case 1:                                                 \
 130                        __get_user_x(__r2, __p, __e, __l, 1);           \
 131                        break;                                          \
 132                case 2:                                                 \
 133                        __get_user_x(__r2, __p, __e, __l, 2);           \
 134                        break;                                          \
 135                case 4:                                                 \
 136                        __get_user_x(__r2, __p, __e, __l, 4);           \
 137                        break;                                          \
 138                default: __e = __get_user_bad(); break;                 \
 139                }                                                       \
 140                x = (typeof(*(p))) __r2;                                \
 141                __e;                                                    \
 142        })
 143
 144#define get_user(x,p)                                                   \
 145        ({                                                              \
 146                might_fault();                                          \
 147                __get_user_check(x,p);                                  \
 148         })
 149
 150extern int __put_user_1(void *, unsigned int);
 151extern int __put_user_2(void *, unsigned int);
 152extern int __put_user_4(void *, unsigned int);
 153extern int __put_user_8(void *, unsigned long long);
 154
 155#define __put_user_x(__r2,__p,__e,__l,__s)                              \
 156           __asm__ __volatile__ (                                       \
 157                __asmeq("%0", "r0") __asmeq("%2", "r2")                 \
 158                __asmeq("%3", "r1")                                     \
 159                "bl     __put_user_" #__s                               \
 160                : "=&r" (__e)                                           \
 161                : "0" (__p), "r" (__r2), "r" (__l)                      \
 162                : "ip", "lr", "cc")
 163
 164#define __put_user_check(x,p)                                                   \
 165        ({                                                              \
 166                unsigned long __limit = current_thread_info()->addr_limit - 1; \
 167                register const typeof(*(p)) __r2 asm("r2") = (x);       \
 168                register const typeof(*(p)) __user *__p asm("r0") = (p);\
 169                register unsigned long __l asm("r1") = __limit;         \
 170                register int __e asm("r0");                             \
 171                switch (sizeof(*(__p))) {                               \
 172                case 1:                                                 \
 173                        __put_user_x(__r2, __p, __e, __l, 1);           \
 174                        break;                                          \
 175                case 2:                                                 \
 176                        __put_user_x(__r2, __p, __e, __l, 2);           \
 177                        break;                                          \
 178                case 4:                                                 \
 179                        __put_user_x(__r2, __p, __e, __l, 4);           \
 180                        break;                                          \
 181                case 8:                                                 \
 182                        __put_user_x(__r2, __p, __e, __l, 8);           \
 183                        break;                                          \
 184                default: __e = __put_user_bad(); break;                 \
 185                }                                                       \
 186                __e;                                                    \
 187        })
 188
 189#define put_user(x,p)                                                   \
 190        ({                                                              \
 191                might_fault();                                          \
 192                __put_user_check(x,p);                                  \
 193         })
 194
 195#else /* CONFIG_MMU */
 196
 197/*
 198 * uClinux has only one addr space, so has simplified address limits.
 199 */
 200#define USER_DS                 KERNEL_DS
 201
 202#define segment_eq(a,b)         (1)
 203#define __addr_ok(addr)         ((void)(addr),1)
 204#define __range_ok(addr,size)   ((void)(addr),0)
 205#define get_fs()                (KERNEL_DS)
 206
 207static inline void set_fs(mm_segment_t fs)
 208{
 209}
 210
 211#define get_user(x,p)   __get_user(x,p)
 212#define put_user(x,p)   __put_user(x,p)
 213
 214#endif /* CONFIG_MMU */
 215
 216#define access_ok(type,addr,size)       (__range_ok(addr,size) == 0)
 217
 218#define user_addr_max() \
 219        (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
 220
 221/*
 222 * The "__xxx" versions of the user access functions do not verify the
 223 * address space - it must have been done previously with a separate
 224 * "access_ok()" call.
 225 *
 226 * The "xxx_error" versions set the third argument to EFAULT if an
 227 * error occurs, and leave it unchanged on success.  Note that these
 228 * versions are void (ie, don't return a value as such).
 229 */
 230#define __get_user(x,ptr)                                               \
 231({                                                                      \
 232        long __gu_err = 0;                                              \
 233        __get_user_err((x),(ptr),__gu_err);                             \
 234        __gu_err;                                                       \
 235})
 236
 237#define __get_user_error(x,ptr,err)                                     \
 238({                                                                      \
 239        __get_user_err((x),(ptr),err);                                  \
 240        (void) 0;                                                       \
 241})
 242
 243#define __get_user_err(x,ptr,err)                                       \
 244do {                                                                    \
 245        unsigned long __gu_addr = (unsigned long)(ptr);                 \
 246        unsigned long __gu_val;                                         \
 247        __chk_user_ptr(ptr);                                            \
 248        might_fault();                                                  \
 249        switch (sizeof(*(ptr))) {                                       \
 250        case 1: __get_user_asm_byte(__gu_val,__gu_addr,err);    break;  \
 251        case 2: __get_user_asm_half(__gu_val,__gu_addr,err);    break;  \
 252        case 4: __get_user_asm_word(__gu_val,__gu_addr,err);    break;  \
 253        default: (__gu_val) = __get_user_bad();                         \
 254        }                                                               \
 255        (x) = (__typeof__(*(ptr)))__gu_val;                             \
 256} while (0)
 257
 258#define __get_user_asm_byte(x,addr,err)                         \
 259        __asm__ __volatile__(                                   \
 260        "1:     " TUSER(ldrb) " %1,[%2],#0\n"                   \
 261        "2:\n"                                                  \
 262        "       .pushsection .fixup,\"ax\"\n"                   \
 263        "       .align  2\n"                                    \
 264        "3:     mov     %0, %3\n"                               \
 265        "       mov     %1, #0\n"                               \
 266        "       b       2b\n"                                   \
 267        "       .popsection\n"                                  \
 268        "       .pushsection __ex_table,\"a\"\n"                \
 269        "       .align  3\n"                                    \
 270        "       .long   1b, 3b\n"                               \
 271        "       .popsection"                                    \
 272        : "+r" (err), "=&r" (x)                                 \
 273        : "r" (addr), "i" (-EFAULT)                             \
 274        : "cc")
 275
 276#ifndef __ARMEB__
 277#define __get_user_asm_half(x,__gu_addr,err)                    \
 278({                                                              \
 279        unsigned long __b1, __b2;                               \
 280        __get_user_asm_byte(__b1, __gu_addr, err);              \
 281        __get_user_asm_byte(__b2, __gu_addr + 1, err);          \
 282        (x) = __b1 | (__b2 << 8);                               \
 283})
 284#else
 285#define __get_user_asm_half(x,__gu_addr,err)                    \
 286({                                                              \
 287        unsigned long __b1, __b2;                               \
 288        __get_user_asm_byte(__b1, __gu_addr, err);              \
 289        __get_user_asm_byte(__b2, __gu_addr + 1, err);          \
 290        (x) = (__b1 << 8) | __b2;                               \
 291})
 292#endif
 293
 294#define __get_user_asm_word(x,addr,err)                         \
 295        __asm__ __volatile__(                                   \
 296        "1:     " TUSER(ldr) "  %1,[%2],#0\n"                   \
 297        "2:\n"                                                  \
 298        "       .pushsection .fixup,\"ax\"\n"                   \
 299        "       .align  2\n"                                    \
 300        "3:     mov     %0, %3\n"                               \
 301        "       mov     %1, #0\n"                               \
 302        "       b       2b\n"                                   \
 303        "       .popsection\n"                                  \
 304        "       .pushsection __ex_table,\"a\"\n"                \
 305        "       .align  3\n"                                    \
 306        "       .long   1b, 3b\n"                               \
 307        "       .popsection"                                    \
 308        : "+r" (err), "=&r" (x)                                 \
 309        : "r" (addr), "i" (-EFAULT)                             \
 310        : "cc")
 311
 312#define __put_user(x,ptr)                                               \
 313({                                                                      \
 314        long __pu_err = 0;                                              \
 315        __put_user_err((x),(ptr),__pu_err);                             \
 316        __pu_err;                                                       \
 317})
 318
 319#define __put_user_error(x,ptr,err)                                     \
 320({                                                                      \
 321        __put_user_err((x),(ptr),err);                                  \
 322        (void) 0;                                                       \
 323})
 324
 325#define __put_user_err(x,ptr,err)                                       \
 326do {                                                                    \
 327        unsigned long __pu_addr = (unsigned long)(ptr);                 \
 328        __typeof__(*(ptr)) __pu_val = (x);                              \
 329        __chk_user_ptr(ptr);                                            \
 330        might_fault();                                                  \
 331        switch (sizeof(*(ptr))) {                                       \
 332        case 1: __put_user_asm_byte(__pu_val,__pu_addr,err);    break;  \
 333        case 2: __put_user_asm_half(__pu_val,__pu_addr,err);    break;  \
 334        case 4: __put_user_asm_word(__pu_val,__pu_addr,err);    break;  \
 335        case 8: __put_user_asm_dword(__pu_val,__pu_addr,err);   break;  \
 336        default: __put_user_bad();                                      \
 337        }                                                               \
 338} while (0)
 339
 340#define __put_user_asm_byte(x,__pu_addr,err)                    \
 341        __asm__ __volatile__(                                   \
 342        "1:     " TUSER(strb) " %1,[%2],#0\n"                   \
 343        "2:\n"                                                  \
 344        "       .pushsection .fixup,\"ax\"\n"                   \
 345        "       .align  2\n"                                    \
 346        "3:     mov     %0, %3\n"                               \
 347        "       b       2b\n"                                   \
 348        "       .popsection\n"                                  \
 349        "       .pushsection __ex_table,\"a\"\n"                \
 350        "       .align  3\n"                                    \
 351        "       .long   1b, 3b\n"                               \
 352        "       .popsection"                                    \
 353        : "+r" (err)                                            \
 354        : "r" (x), "r" (__pu_addr), "i" (-EFAULT)               \
 355        : "cc")
 356
 357#ifndef __ARMEB__
 358#define __put_user_asm_half(x,__pu_addr,err)                    \
 359({                                                              \
 360        unsigned long __temp = (unsigned long)(x);              \
 361        __put_user_asm_byte(__temp, __pu_addr, err);            \
 362        __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);   \
 363})
 364#else
 365#define __put_user_asm_half(x,__pu_addr,err)                    \
 366({                                                              \
 367        unsigned long __temp = (unsigned long)(x);              \
 368        __put_user_asm_byte(__temp >> 8, __pu_addr, err);       \
 369        __put_user_asm_byte(__temp, __pu_addr + 1, err);        \
 370})
 371#endif
 372
 373#define __put_user_asm_word(x,__pu_addr,err)                    \
 374        __asm__ __volatile__(                                   \
 375        "1:     " TUSER(str) "  %1,[%2],#0\n"                   \
 376        "2:\n"                                                  \
 377        "       .pushsection .fixup,\"ax\"\n"                   \
 378        "       .align  2\n"                                    \
 379        "3:     mov     %0, %3\n"                               \
 380        "       b       2b\n"                                   \
 381        "       .popsection\n"                                  \
 382        "       .pushsection __ex_table,\"a\"\n"                \
 383        "       .align  3\n"                                    \
 384        "       .long   1b, 3b\n"                               \
 385        "       .popsection"                                    \
 386        : "+r" (err)                                            \
 387        : "r" (x), "r" (__pu_addr), "i" (-EFAULT)               \
 388        : "cc")
 389
 390#ifndef __ARMEB__
 391#define __reg_oper0     "%R2"
 392#define __reg_oper1     "%Q2"
 393#else
 394#define __reg_oper0     "%Q2"
 395#define __reg_oper1     "%R2"
 396#endif
 397
 398#define __put_user_asm_dword(x,__pu_addr,err)                   \
 399        __asm__ __volatile__(                                   \
 400 ARM(   "1:     " TUSER(str) "  " __reg_oper1 ", [%1], #4\n"    ) \
 401 ARM(   "2:     " TUSER(str) "  " __reg_oper0 ", [%1]\n"        ) \
 402 THUMB( "1:     " TUSER(str) "  " __reg_oper1 ", [%1]\n"        ) \
 403 THUMB( "2:     " TUSER(str) "  " __reg_oper0 ", [%1, #4]\n"    ) \
 404        "3:\n"                                                  \
 405        "       .pushsection .fixup,\"ax\"\n"                   \
 406        "       .align  2\n"                                    \
 407        "4:     mov     %0, %3\n"                               \
 408        "       b       3b\n"                                   \
 409        "       .popsection\n"                                  \
 410        "       .pushsection __ex_table,\"a\"\n"                \
 411        "       .align  3\n"                                    \
 412        "       .long   1b, 4b\n"                               \
 413        "       .long   2b, 4b\n"                               \
 414        "       .popsection"                                    \
 415        : "+r" (err), "+r" (__pu_addr)                          \
 416        : "r" (x), "i" (-EFAULT)                                \
 417        : "cc")
 418
 419
 420#ifdef CONFIG_MMU
 421extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
 422extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
 423extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
 424extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
 425extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
 426#else
 427#define __copy_from_user(to,from,n)     (memcpy(to, (void __force *)from, n), 0)
 428#define __copy_to_user(to,from,n)       (memcpy((void __force *)to, from, n), 0)
 429#define __clear_user(addr,n)            (memset((void __force *)addr, 0, n), 0)
 430#endif
 431
 432static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
 433{
 434        if (access_ok(VERIFY_READ, from, n))
 435                n = __copy_from_user(to, from, n);
 436        else /* security hole - plug it */
 437                memset(to, 0, n);
 438        return n;
 439}
 440
 441static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
 442{
 443        if (access_ok(VERIFY_WRITE, to, n))
 444                n = __copy_to_user(to, from, n);
 445        return n;
 446}
 447
 448#define __copy_to_user_inatomic __copy_to_user
 449#define __copy_from_user_inatomic __copy_from_user
 450
 451static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
 452{
 453        if (access_ok(VERIFY_WRITE, to, n))
 454                n = __clear_user(to, n);
 455        return n;
 456}
 457
 458extern long strncpy_from_user(char *dest, const char __user *src, long count);
 459
 460extern __must_check long strlen_user(const char __user *str);
 461extern __must_check long strnlen_user(const char __user *str, long n);
 462
 463#endif /* _ASMARM_UACCESS_H */
 464