linux/arch/cris/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 * Authors:    Bjorn Wesen (bjornw@axis.com)
   3 *             Hans-Peter Nilsson (hp@axis.com)
   4 */
   5
   6/* Asm:s have been tweaked (within the domain of correctness) to give
   7   satisfactory results for "gcc version 2.96 20000427 (experimental)".
   8
   9   Check regularly...
  10
  11   Register $r9 is chosen for temporaries, being a call-clobbered register
  12   first in line to be used (notably for local blocks), not colliding with
  13   parameter registers.  */
  14
  15#ifndef _CRIS_UACCESS_H
  16#define _CRIS_UACCESS_H
  17
  18#ifndef __ASSEMBLY__
  19#include <linux/sched.h>
  20#include <linux/errno.h>
  21#include <asm/processor.h>
  22#include <asm/page.h>
  23
  24#define VERIFY_READ     0
  25#define VERIFY_WRITE    1
  26
  27/*
  28 * The fs value determines whether argument validity checking should be
  29 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  30 * get_fs() == KERNEL_DS, checking is bypassed.
  31 *
  32 * For historical reasons, these macros are grossly misnamed.
  33 */
  34
  35#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
  36
  37/* addr_limit is the maximum accessible address for the task. we misuse
  38 * the KERNEL_DS and USER_DS values to both assign and compare the
  39 * addr_limit values through the equally misnamed get/set_fs macros.
  40 * (see above)
  41 */
  42
  43#define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
  44#define USER_DS         MAKE_MM_SEG(TASK_SIZE)
  45
  46#define get_ds()        (KERNEL_DS)
  47#define get_fs()        (current_thread_info()->addr_limit)
  48#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  49
  50#define segment_eq(a, b)        ((a).seg == (b).seg)
  51
  52#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
  53#define __user_ok(addr, size) \
  54        (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
  55#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
  56#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
  57
  58#include <arch/uaccess.h>
  59
  60/*
  61 * The exception table consists of pairs of addresses: the first is the
  62 * address of an instruction that is allowed to fault, and the second is
  63 * the address at which the program should continue.  No registers are
  64 * modified, so it is entirely up to the continuation code to figure out
  65 * what to do.
  66 *
  67 * All the routines below use bits of fixup code that are out of line
  68 * with the main instruction path.  This means when everything is well,
  69 * we don't even have to jump over them.  Further, they do not intrude
  70 * on our cache or tlb entries.
  71 */
  72
  73struct exception_table_entry {
  74        unsigned long insn, fixup;
  75};
  76
  77/*
  78 * These are the main single-value transfer routines.  They automatically
  79 * use the right size if we just have the right pointer type.
  80 *
  81 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  82 * and yet we don't want to do any pointers, because that is too much
  83 * of a performance impact. Thus we have a few rather ugly macros here,
  84 * and hide all the ugliness from the user.
  85 *
  86 * The "__xxx" versions of the user access functions are versions that
  87 * do not verify the address space, that must have been done previously
  88 * with a separate "access_ok()" call (this is used when we do multiple
  89 * accesses to the same area of user memory).
  90 *
  91 * As we use the same address space for kernel and user data on
  92 * CRIS, we can just do these as direct assignments.  (Of course, the
  93 * exception handling means that it's no longer "just"...)
  94 */
  95#define get_user(x, ptr) \
  96        __get_user_check((x), (ptr), sizeof(*(ptr)))
  97#define put_user(x, ptr) \
  98        __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  99
 100#define __get_user(x, ptr) \
 101        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 102#define __put_user(x, ptr) \
 103        __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 104
 105extern long __put_user_bad(void);
 106
 107#define __put_user_size(x, ptr, size, retval)                           \
 108do {                                                                    \
 109        retval = 0;                                                     \
 110        switch (size) {                                                 \
 111        case 1:                                                         \
 112                __put_user_asm(x, ptr, retval, "move.b");               \
 113                break;                                                  \
 114        case 2:                                                         \
 115                __put_user_asm(x, ptr, retval, "move.w");               \
 116                break;                                                  \
 117        case 4:                                                         \
 118                __put_user_asm(x, ptr, retval, "move.d");               \
 119                break;                                                  \
 120        case 8:                                                         \
 121                __put_user_asm_64(x, ptr, retval);                      \
 122                break;                                                  \
 123        default:                                                        \
 124                __put_user_bad();                                       \
 125        }                                                               \
 126} while (0)
 127
 128#define __get_user_size(x, ptr, size, retval)                           \
 129do {                                                                    \
 130        retval = 0;                                                     \
 131        switch (size) {                                                 \
 132        case 1:                                                         \
 133                __get_user_asm(x, ptr, retval, "move.b");               \
 134                break;                                                  \
 135        case 2:                                                         \
 136                __get_user_asm(x, ptr, retval, "move.w");               \
 137                break;                                                  \
 138        case 4:                                                         \
 139                __get_user_asm(x, ptr, retval, "move.d");               \
 140                break;                                                  \
 141        case 8:                                                         \
 142                __get_user_asm_64(x, ptr, retval);                      \
 143                break;                                                  \
 144        default:                                                        \
 145                (x) = __get_user_bad();                                 \
 146        }                                                               \
 147} while (0)
 148
 149#define __put_user_nocheck(x, ptr, size)                \
 150({                                                      \
 151        long __pu_err;                                  \
 152        __put_user_size((x), (ptr), (size), __pu_err);  \
 153        __pu_err;                                       \
 154})
 155
 156#define __put_user_check(x, ptr, size)                                  \
 157({                                                                      \
 158        long __pu_err = -EFAULT;                                        \
 159        __typeof__(*(ptr)) *__pu_addr = (ptr);                          \
 160        if (access_ok(VERIFY_WRITE, __pu_addr, size))                   \
 161                __put_user_size((x), __pu_addr, (size), __pu_err);      \
 162        __pu_err;                                                       \
 163})
 164
 165struct __large_struct { unsigned long buf[100]; };
 166#define __m(x) (*(struct __large_struct *)(x))
 167
 168
 169
 170#define __get_user_nocheck(x, ptr, size)                        \
 171({                                                              \
 172        long __gu_err, __gu_val;                                \
 173        __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
 174        (x) = (__force __typeof__(*(ptr)))__gu_val;             \
 175        __gu_err;                                               \
 176})
 177
 178#define __get_user_check(x, ptr, size)                                  \
 179({                                                                      \
 180        long __gu_err = -EFAULT, __gu_val = 0;                          \
 181        const __typeof__(*(ptr)) *__gu_addr = (ptr);                    \
 182        if (access_ok(VERIFY_READ, __gu_addr, size))                    \
 183                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
 184        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 185        __gu_err;                                                       \
 186})
 187
 188extern long __get_user_bad(void);
 189
 190/* More complex functions.  Most are inline, but some call functions that
 191   live in lib/usercopy.c  */
 192
 193extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);
 194extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
 195extern unsigned long __do_clear_user(void __user *to, unsigned long n);
 196
 197static inline unsigned long
 198__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
 199{
 200        if (access_ok(VERIFY_WRITE, to, n))
 201                return __copy_user(to, from, n);
 202        return n;
 203}
 204
 205static inline unsigned long
 206__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
 207{
 208        if (access_ok(VERIFY_READ, from, n))
 209                return __copy_user_zeroing(to, from, n);
 210        return n;
 211}
 212
 213static inline unsigned long
 214__generic_clear_user(void __user *to, unsigned long n)
 215{
 216        if (access_ok(VERIFY_WRITE, to, n))
 217                return __do_clear_user(to, n);
 218        return n;
 219}
 220
 221static inline long
 222__strncpy_from_user(char *dst, const char __user *src, long count)
 223{
 224        return __do_strncpy_from_user(dst, src, count);
 225}
 226
 227static inline long
 228strncpy_from_user(char *dst, const char __user *src, long count)
 229{
 230        long res = -EFAULT;
 231
 232        if (access_ok(VERIFY_READ, src, 1))
 233                res = __do_strncpy_from_user(dst, src, count);
 234        return res;
 235}
 236
 237
 238/* Note that these expand awfully if made into switch constructs, so
 239   don't do that.  */
 240
 241static inline unsigned long
 242__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
 243{
 244        unsigned long ret = 0;
 245
 246        if (n == 0)
 247                ;
 248        else if (n == 1)
 249                __asm_copy_from_user_1(to, from, ret);
 250        else if (n == 2)
 251                __asm_copy_from_user_2(to, from, ret);
 252        else if (n == 3)
 253                __asm_copy_from_user_3(to, from, ret);
 254        else if (n == 4)
 255                __asm_copy_from_user_4(to, from, ret);
 256        else if (n == 5)
 257                __asm_copy_from_user_5(to, from, ret);
 258        else if (n == 6)
 259                __asm_copy_from_user_6(to, from, ret);
 260        else if (n == 7)
 261                __asm_copy_from_user_7(to, from, ret);
 262        else if (n == 8)
 263                __asm_copy_from_user_8(to, from, ret);
 264        else if (n == 9)
 265                __asm_copy_from_user_9(to, from, ret);
 266        else if (n == 10)
 267                __asm_copy_from_user_10(to, from, ret);
 268        else if (n == 11)
 269                __asm_copy_from_user_11(to, from, ret);
 270        else if (n == 12)
 271                __asm_copy_from_user_12(to, from, ret);
 272        else if (n == 13)
 273                __asm_copy_from_user_13(to, from, ret);
 274        else if (n == 14)
 275                __asm_copy_from_user_14(to, from, ret);
 276        else if (n == 15)
 277                __asm_copy_from_user_15(to, from, ret);
 278        else if (n == 16)
 279                __asm_copy_from_user_16(to, from, ret);
 280        else if (n == 20)
 281                __asm_copy_from_user_20(to, from, ret);
 282        else if (n == 24)
 283                __asm_copy_from_user_24(to, from, ret);
 284        else
 285                ret = __generic_copy_from_user(to, from, n);
 286
 287        return ret;
 288}
 289
 290/* Ditto, don't make a switch out of this.  */
 291
 292static inline unsigned long
 293__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
 294{
 295        unsigned long ret = 0;
 296
 297        if (n == 0)
 298                ;
 299        else if (n == 1)
 300                __asm_copy_to_user_1(to, from, ret);
 301        else if (n == 2)
 302                __asm_copy_to_user_2(to, from, ret);
 303        else if (n == 3)
 304                __asm_copy_to_user_3(to, from, ret);
 305        else if (n == 4)
 306                __asm_copy_to_user_4(to, from, ret);
 307        else if (n == 5)
 308                __asm_copy_to_user_5(to, from, ret);
 309        else if (n == 6)
 310                __asm_copy_to_user_6(to, from, ret);
 311        else if (n == 7)
 312                __asm_copy_to_user_7(to, from, ret);
 313        else if (n == 8)
 314                __asm_copy_to_user_8(to, from, ret);
 315        else if (n == 9)
 316                __asm_copy_to_user_9(to, from, ret);
 317        else if (n == 10)
 318                __asm_copy_to_user_10(to, from, ret);
 319        else if (n == 11)
 320                __asm_copy_to_user_11(to, from, ret);
 321        else if (n == 12)
 322                __asm_copy_to_user_12(to, from, ret);
 323        else if (n == 13)
 324                __asm_copy_to_user_13(to, from, ret);
 325        else if (n == 14)
 326                __asm_copy_to_user_14(to, from, ret);
 327        else if (n == 15)
 328                __asm_copy_to_user_15(to, from, ret);
 329        else if (n == 16)
 330                __asm_copy_to_user_16(to, from, ret);
 331        else if (n == 20)
 332                __asm_copy_to_user_20(to, from, ret);
 333        else if (n == 24)
 334                __asm_copy_to_user_24(to, from, ret);
 335        else
 336                ret = __generic_copy_to_user(to, from, n);
 337
 338        return ret;
 339}
 340
 341/* No switch, please.  */
 342
 343static inline unsigned long
 344__constant_clear_user(void __user *to, unsigned long n)
 345{
 346        unsigned long ret = 0;
 347
 348        if (n == 0)
 349                ;
 350        else if (n == 1)
 351                __asm_clear_1(to, ret);
 352        else if (n == 2)
 353                __asm_clear_2(to, ret);
 354        else if (n == 3)
 355                __asm_clear_3(to, ret);
 356        else if (n == 4)
 357                __asm_clear_4(to, ret);
 358        else if (n == 8)
 359                __asm_clear_8(to, ret);
 360        else if (n == 12)
 361                __asm_clear_12(to, ret);
 362        else if (n == 16)
 363                __asm_clear_16(to, ret);
 364        else if (n == 20)
 365                __asm_clear_20(to, ret);
 366        else if (n == 24)
 367                __asm_clear_24(to, ret);
 368        else
 369                ret = __generic_clear_user(to, n);
 370
 371        return ret;
 372}
 373
 374
 375#define clear_user(to, n)                               \
 376        (__builtin_constant_p(n) ?                      \
 377         __constant_clear_user(to, n) :                 \
 378         __generic_clear_user(to, n))
 379
 380#define copy_from_user(to, from, n)                     \
 381        (__builtin_constant_p(n) ?                      \
 382         __constant_copy_from_user(to, from, n) :       \
 383         __generic_copy_from_user(to, from, n))
 384
 385#define copy_to_user(to, from, n)                       \
 386        (__builtin_constant_p(n) ?                      \
 387         __constant_copy_to_user(to, from, n) :         \
 388         __generic_copy_to_user(to, from, n))
 389
 390/* We let the __ versions of copy_from/to_user inline, because they're often
 391 * used in fast paths and have only a small space overhead.
 392 */
 393
 394static inline unsigned long
 395__generic_copy_from_user_nocheck(void *to, const void __user *from,
 396                                 unsigned long n)
 397{
 398        return __copy_user_zeroing(to, from, n);
 399}
 400
 401static inline unsigned long
 402__generic_copy_to_user_nocheck(void __user *to, const void *from,
 403                               unsigned long n)
 404{
 405        return __copy_user(to, from, n);
 406}
 407
 408static inline unsigned long
 409__generic_clear_user_nocheck(void __user *to, unsigned long n)
 410{
 411        return __do_clear_user(to, n);
 412}
 413
 414/* without checking */
 415
 416#define __copy_to_user(to, from, n) \
 417        __generic_copy_to_user_nocheck((to), (from), (n))
 418#define __copy_from_user(to, from, n) \
 419        __generic_copy_from_user_nocheck((to), (from), (n))
 420#define __copy_to_user_inatomic __copy_to_user
 421#define __copy_from_user_inatomic __copy_from_user
 422#define __clear_user(to, n) __generic_clear_user_nocheck((to), (n))
 423
 424#define strlen_user(str)        strnlen_user((str), 0x7ffffffe)
 425
 426#endif  /* __ASSEMBLY__ */
 427
 428#endif  /* _CRIS_UACCESS_H */
 429