linux/arch/cris/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 * Authors:    Bjorn Wesen (bjornw@axis.com)
   3 *             Hans-Peter Nilsson (hp@axis.com)
   4 */
   5
   6/* Asm:s have been tweaked (within the domain of correctness) to give
   7   satisfactory results for "gcc version 2.96 20000427 (experimental)".
   8
   9   Check regularly...
  10
  11   Register $r9 is chosen for temporaries, being a call-clobbered register
  12   first in line to be used (notably for local blocks), not colliding with
  13   parameter registers.  */
  14
  15#ifndef _CRIS_UACCESS_H
  16#define _CRIS_UACCESS_H
  17
  18#include <asm/processor.h>
  19#include <asm/page.h>
  20
  21/*
  22 * The fs value determines whether argument validity checking should be
  23 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  24 * get_fs() == KERNEL_DS, checking is bypassed.
  25 *
  26 * For historical reasons, these macros are grossly misnamed.
  27 */
  28
  29#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
  30
  31/* addr_limit is the maximum accessible address for the task. we misuse
  32 * the KERNEL_DS and USER_DS values to both assign and compare the
  33 * addr_limit values through the equally misnamed get/set_fs macros.
  34 * (see above)
  35 */
  36
  37#define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
  38#define USER_DS         MAKE_MM_SEG(TASK_SIZE)
  39
  40#define get_ds()        (KERNEL_DS)
  41#define get_fs()        (current_thread_info()->addr_limit)
  42#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  43
  44#define segment_eq(a, b)        ((a).seg == (b).seg)
  45
  46#define __kernel_ok (uaccess_kernel())
  47#define __user_ok(addr, size) \
  48        (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
  49#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
  50#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
  51
  52#include <arch/uaccess.h>
  53#include <asm/extable.h>
  54
  55/*
  56 * These are the main single-value transfer routines.  They automatically
  57 * use the right size if we just have the right pointer type.
  58 *
  59 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  60 * and yet we don't want to do any pointers, because that is too much
  61 * of a performance impact. Thus we have a few rather ugly macros here,
  62 * and hide all the ugliness from the user.
  63 *
  64 * The "__xxx" versions of the user access functions are versions that
  65 * do not verify the address space, that must have been done previously
  66 * with a separate "access_ok()" call (this is used when we do multiple
  67 * accesses to the same area of user memory).
  68 *
  69 * As we use the same address space for kernel and user data on
  70 * CRIS, we can just do these as direct assignments.  (Of course, the
  71 * exception handling means that it's no longer "just"...)
  72 */
  73#define get_user(x, ptr) \
  74        __get_user_check((x), (ptr), sizeof(*(ptr)))
  75#define put_user(x, ptr) \
  76        __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  77
  78#define __get_user(x, ptr) \
  79        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  80#define __put_user(x, ptr) \
  81        __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  82
  83extern long __put_user_bad(void);
  84
  85#define __put_user_size(x, ptr, size, retval)                           \
  86do {                                                                    \
  87        retval = 0;                                                     \
  88        switch (size) {                                                 \
  89        case 1:                                                         \
  90                __put_user_asm(x, ptr, retval, "move.b");               \
  91                break;                                                  \
  92        case 2:                                                         \
  93                __put_user_asm(x, ptr, retval, "move.w");               \
  94                break;                                                  \
  95        case 4:                                                         \
  96                __put_user_asm(x, ptr, retval, "move.d");               \
  97                break;                                                  \
  98        case 8:                                                         \
  99                __put_user_asm_64(x, ptr, retval);                      \
 100                break;                                                  \
 101        default:                                                        \
 102                __put_user_bad();                                       \
 103        }                                                               \
 104} while (0)
 105
 106#define __get_user_size(x, ptr, size, retval)                           \
 107do {                                                                    \
 108        retval = 0;                                                     \
 109        switch (size) {                                                 \
 110        case 1:                                                         \
 111                __get_user_asm(x, ptr, retval, "move.b");               \
 112                break;                                                  \
 113        case 2:                                                         \
 114                __get_user_asm(x, ptr, retval, "move.w");               \
 115                break;                                                  \
 116        case 4:                                                         \
 117                __get_user_asm(x, ptr, retval, "move.d");               \
 118                break;                                                  \
 119        case 8:                                                         \
 120                __get_user_asm_64(x, ptr, retval);                      \
 121                break;                                                  \
 122        default:                                                        \
 123                (x) = __get_user_bad();                                 \
 124        }                                                               \
 125} while (0)
 126
 127#define __put_user_nocheck(x, ptr, size)                \
 128({                                                      \
 129        long __pu_err;                                  \
 130        __put_user_size((x), (ptr), (size), __pu_err);  \
 131        __pu_err;                                       \
 132})
 133
 134#define __put_user_check(x, ptr, size)                                  \
 135({                                                                      \
 136        long __pu_err = -EFAULT;                                        \
 137        __typeof__(*(ptr)) *__pu_addr = (ptr);                          \
 138        if (access_ok(VERIFY_WRITE, __pu_addr, size))                   \
 139                __put_user_size((x), __pu_addr, (size), __pu_err);      \
 140        __pu_err;                                                       \
 141})
 142
 143struct __large_struct { unsigned long buf[100]; };
 144#define __m(x) (*(struct __large_struct *)(x))
 145
 146
 147
 148#define __get_user_nocheck(x, ptr, size)                        \
 149({                                                              \
 150        long __gu_err, __gu_val;                                \
 151        __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
 152        (x) = (__force __typeof__(*(ptr)))__gu_val;             \
 153        __gu_err;                                               \
 154})
 155
 156#define __get_user_check(x, ptr, size)                                  \
 157({                                                                      \
 158        long __gu_err = -EFAULT, __gu_val = 0;                          \
 159        const __typeof__(*(ptr)) *__gu_addr = (ptr);                    \
 160        if (access_ok(VERIFY_READ, __gu_addr, size))                    \
 161                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
 162        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 163        __gu_err;                                                       \
 164})
 165
 166extern long __get_user_bad(void);
 167
 168/* More complex functions.  Most are inline, but some call functions that
 169   live in lib/usercopy.c  */
 170
 171extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);
 172extern unsigned long __copy_user_in(void *to, const void __user *from, unsigned long n);
 173extern unsigned long __do_clear_user(void __user *to, unsigned long n);
 174
 175static inline long
 176strncpy_from_user(char *dst, const char __user *src, long count)
 177{
 178        long res = -EFAULT;
 179
 180        if (access_ok(VERIFY_READ, src, 1))
 181                res = __do_strncpy_from_user(dst, src, count);
 182        return res;
 183}
 184
 185
 186/* Note that these expand awfully if made into switch constructs, so
 187   don't do that.  */
 188
 189static inline unsigned long
 190__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
 191{
 192        unsigned long ret = 0;
 193
 194        if (n == 0)
 195                ;
 196        else if (n == 1)
 197                __asm_copy_from_user_1(to, from, ret);
 198        else if (n == 2)
 199                __asm_copy_from_user_2(to, from, ret);
 200        else if (n == 3)
 201                __asm_copy_from_user_3(to, from, ret);
 202        else if (n == 4)
 203                __asm_copy_from_user_4(to, from, ret);
 204        else if (n == 5)
 205                __asm_copy_from_user_5(to, from, ret);
 206        else if (n == 6)
 207                __asm_copy_from_user_6(to, from, ret);
 208        else if (n == 7)
 209                __asm_copy_from_user_7(to, from, ret);
 210        else if (n == 8)
 211                __asm_copy_from_user_8(to, from, ret);
 212        else if (n == 9)
 213                __asm_copy_from_user_9(to, from, ret);
 214        else if (n == 10)
 215                __asm_copy_from_user_10(to, from, ret);
 216        else if (n == 11)
 217                __asm_copy_from_user_11(to, from, ret);
 218        else if (n == 12)
 219                __asm_copy_from_user_12(to, from, ret);
 220        else if (n == 13)
 221                __asm_copy_from_user_13(to, from, ret);
 222        else if (n == 14)
 223                __asm_copy_from_user_14(to, from, ret);
 224        else if (n == 15)
 225                __asm_copy_from_user_15(to, from, ret);
 226        else if (n == 16)
 227                __asm_copy_from_user_16(to, from, ret);
 228        else if (n == 20)
 229                __asm_copy_from_user_20(to, from, ret);
 230        else if (n == 24)
 231                __asm_copy_from_user_24(to, from, ret);
 232        else
 233                ret = __copy_user_in(to, from, n);
 234
 235        return ret;
 236}
 237
 238/* Ditto, don't make a switch out of this.  */
 239
 240static inline unsigned long
 241__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
 242{
 243        unsigned long ret = 0;
 244
 245        if (n == 0)
 246                ;
 247        else if (n == 1)
 248                __asm_copy_to_user_1(to, from, ret);
 249        else if (n == 2)
 250                __asm_copy_to_user_2(to, from, ret);
 251        else if (n == 3)
 252                __asm_copy_to_user_3(to, from, ret);
 253        else if (n == 4)
 254                __asm_copy_to_user_4(to, from, ret);
 255        else if (n == 5)
 256                __asm_copy_to_user_5(to, from, ret);
 257        else if (n == 6)
 258                __asm_copy_to_user_6(to, from, ret);
 259        else if (n == 7)
 260                __asm_copy_to_user_7(to, from, ret);
 261        else if (n == 8)
 262                __asm_copy_to_user_8(to, from, ret);
 263        else if (n == 9)
 264                __asm_copy_to_user_9(to, from, ret);
 265        else if (n == 10)
 266                __asm_copy_to_user_10(to, from, ret);
 267        else if (n == 11)
 268                __asm_copy_to_user_11(to, from, ret);
 269        else if (n == 12)
 270                __asm_copy_to_user_12(to, from, ret);
 271        else if (n == 13)
 272                __asm_copy_to_user_13(to, from, ret);
 273        else if (n == 14)
 274                __asm_copy_to_user_14(to, from, ret);
 275        else if (n == 15)
 276                __asm_copy_to_user_15(to, from, ret);
 277        else if (n == 16)
 278                __asm_copy_to_user_16(to, from, ret);
 279        else if (n == 20)
 280                __asm_copy_to_user_20(to, from, ret);
 281        else if (n == 24)
 282                __asm_copy_to_user_24(to, from, ret);
 283        else
 284                ret = __copy_user(to, from, n);
 285
 286        return ret;
 287}
 288
 289/* No switch, please.  */
 290
 291static inline unsigned long
 292__constant_clear_user(void __user *to, unsigned long n)
 293{
 294        unsigned long ret = 0;
 295
 296        if (n == 0)
 297                ;
 298        else if (n == 1)
 299                __asm_clear_1(to, ret);
 300        else if (n == 2)
 301                __asm_clear_2(to, ret);
 302        else if (n == 3)
 303                __asm_clear_3(to, ret);
 304        else if (n == 4)
 305                __asm_clear_4(to, ret);
 306        else if (n == 8)
 307                __asm_clear_8(to, ret);
 308        else if (n == 12)
 309                __asm_clear_12(to, ret);
 310        else if (n == 16)
 311                __asm_clear_16(to, ret);
 312        else if (n == 20)
 313                __asm_clear_20(to, ret);
 314        else if (n == 24)
 315                __asm_clear_24(to, ret);
 316        else
 317                ret = __do_clear_user(to, n);
 318
 319        return ret;
 320}
 321
 322
 323static inline size_t clear_user(void __user *to, size_t n)
 324{
 325        if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
 326                return n;
 327        if (__builtin_constant_p(n))
 328                return __constant_clear_user(to, n);
 329        else
 330                return __do_clear_user(to, n);
 331}
 332
 333static inline unsigned long
 334raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 335{
 336        if (__builtin_constant_p(n))
 337                return __constant_copy_from_user(to, from, n);
 338        else
 339                return __copy_user_in(to, from, n);
 340}
 341
 342static inline unsigned long
 343raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 344{
 345        if (__builtin_constant_p(n))
 346                return __constant_copy_to_user(to, from, n);
 347        else
 348                return __copy_user(to, from, n);
 349}
 350
 351#define INLINE_COPY_FROM_USER
 352#define INLINE_COPY_TO_USER
 353
 354static inline unsigned long
 355__clear_user(void __user *to, unsigned long n)
 356{
 357        return __do_clear_user(to, n);
 358}
 359
 360#endif  /* _CRIS_UACCESS_H */
 361