linux/arch/cris/include/asm/uaccess.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Authors:    Bjorn Wesen (bjornw@axis.com)
   4 *             Hans-Peter Nilsson (hp@axis.com)
   5 */
   6
   7/* Asm:s have been tweaked (within the domain of correctness) to give
   8   satisfactory results for "gcc version 2.96 20000427 (experimental)".
   9
  10   Check regularly...
  11
  12   Register $r9 is chosen for temporaries, being a call-clobbered register
  13   first in line to be used (notably for local blocks), not colliding with
  14   parameter registers.  */
  15
  16#ifndef _CRIS_UACCESS_H
  17#define _CRIS_UACCESS_H
  18
  19#include <asm/processor.h>
  20#include <asm/page.h>
  21
  22/*
  23 * The fs value determines whether argument validity checking should be
  24 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  25 * get_fs() == KERNEL_DS, checking is bypassed.
  26 *
  27 * For historical reasons, these macros are grossly misnamed.
  28 */
  29
  30#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
  31
  32/* addr_limit is the maximum accessible address for the task. we misuse
  33 * the KERNEL_DS and USER_DS values to both assign and compare the
  34 * addr_limit values through the equally misnamed get/set_fs macros.
  35 * (see above)
  36 */
  37
  38#define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
  39#define USER_DS         MAKE_MM_SEG(TASK_SIZE)
  40
  41#define get_ds()        (KERNEL_DS)
  42#define get_fs()        (current_thread_info()->addr_limit)
  43#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  44
  45#define segment_eq(a, b)        ((a).seg == (b).seg)
  46
  47#define __kernel_ok (uaccess_kernel())
  48#define __user_ok(addr, size) \
  49        (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
  50#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
  51#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
  52
  53#include <arch/uaccess.h>
  54#include <asm/extable.h>
  55
  56/*
  57 * These are the main single-value transfer routines.  They automatically
  58 * use the right size if we just have the right pointer type.
  59 *
  60 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  61 * and yet we don't want to do any pointers, because that is too much
  62 * of a performance impact. Thus we have a few rather ugly macros here,
  63 * and hide all the ugliness from the user.
  64 *
  65 * The "__xxx" versions of the user access functions are versions that
  66 * do not verify the address space, that must have been done previously
  67 * with a separate "access_ok()" call (this is used when we do multiple
  68 * accesses to the same area of user memory).
  69 *
  70 * As we use the same address space for kernel and user data on
  71 * CRIS, we can just do these as direct assignments.  (Of course, the
  72 * exception handling means that it's no longer "just"...)
  73 */
  74#define get_user(x, ptr) \
  75        __get_user_check((x), (ptr), sizeof(*(ptr)))
  76#define put_user(x, ptr) \
  77        __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  78
  79#define __get_user(x, ptr) \
  80        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  81#define __put_user(x, ptr) \
  82        __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  83
  84extern long __put_user_bad(void);
  85
  86#define __put_user_size(x, ptr, size, retval)                           \
  87do {                                                                    \
  88        retval = 0;                                                     \
  89        switch (size) {                                                 \
  90        case 1:                                                         \
  91                __put_user_asm(x, ptr, retval, "move.b");               \
  92                break;                                                  \
  93        case 2:                                                         \
  94                __put_user_asm(x, ptr, retval, "move.w");               \
  95                break;                                                  \
  96        case 4:                                                         \
  97                __put_user_asm(x, ptr, retval, "move.d");               \
  98                break;                                                  \
  99        case 8:                                                         \
 100                __put_user_asm_64(x, ptr, retval);                      \
 101                break;                                                  \
 102        default:                                                        \
 103                __put_user_bad();                                       \
 104        }                                                               \
 105} while (0)
 106
 107#define __get_user_size(x, ptr, size, retval)                           \
 108do {                                                                    \
 109        retval = 0;                                                     \
 110        switch (size) {                                                 \
 111        case 1:                                                         \
 112                __get_user_asm(x, ptr, retval, "move.b");               \
 113                break;                                                  \
 114        case 2:                                                         \
 115                __get_user_asm(x, ptr, retval, "move.w");               \
 116                break;                                                  \
 117        case 4:                                                         \
 118                __get_user_asm(x, ptr, retval, "move.d");               \
 119                break;                                                  \
 120        case 8:                                                         \
 121                __get_user_asm_64(x, ptr, retval);                      \
 122                break;                                                  \
 123        default:                                                        \
 124                (x) = __get_user_bad();                                 \
 125        }                                                               \
 126} while (0)
 127
 128#define __put_user_nocheck(x, ptr, size)                \
 129({                                                      \
 130        long __pu_err;                                  \
 131        __put_user_size((x), (ptr), (size), __pu_err);  \
 132        __pu_err;                                       \
 133})
 134
 135#define __put_user_check(x, ptr, size)                                  \
 136({                                                                      \
 137        long __pu_err = -EFAULT;                                        \
 138        __typeof__(*(ptr)) *__pu_addr = (ptr);                          \
 139        if (access_ok(VERIFY_WRITE, __pu_addr, size))                   \
 140                __put_user_size((x), __pu_addr, (size), __pu_err);      \
 141        __pu_err;                                                       \
 142})
 143
 144struct __large_struct { unsigned long buf[100]; };
 145#define __m(x) (*(struct __large_struct *)(x))
 146
 147
 148
 149#define __get_user_nocheck(x, ptr, size)                        \
 150({                                                              \
 151        long __gu_err, __gu_val;                                \
 152        __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
 153        (x) = (__force __typeof__(*(ptr)))__gu_val;             \
 154        __gu_err;                                               \
 155})
 156
 157#define __get_user_check(x, ptr, size)                                  \
 158({                                                                      \
 159        long __gu_err = -EFAULT, __gu_val = 0;                          \
 160        const __typeof__(*(ptr)) *__gu_addr = (ptr);                    \
 161        if (access_ok(VERIFY_READ, __gu_addr, size))                    \
 162                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
 163        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 164        __gu_err;                                                       \
 165})
 166
 167extern long __get_user_bad(void);
 168
 169/* More complex functions.  Most are inline, but some call functions that
 170   live in lib/usercopy.c  */
 171
 172extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);
 173extern unsigned long __copy_user_in(void *to, const void __user *from, unsigned long n);
 174extern unsigned long __do_clear_user(void __user *to, unsigned long n);
 175
 176static inline long
 177strncpy_from_user(char *dst, const char __user *src, long count)
 178{
 179        long res = -EFAULT;
 180
 181        if (access_ok(VERIFY_READ, src, 1))
 182                res = __do_strncpy_from_user(dst, src, count);
 183        return res;
 184}
 185
 186
 187/* Note that these expand awfully if made into switch constructs, so
 188   don't do that.  */
 189
 190static inline unsigned long
 191__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
 192{
 193        unsigned long ret = 0;
 194
 195        if (n == 0)
 196                ;
 197        else if (n == 1)
 198                __asm_copy_from_user_1(to, from, ret);
 199        else if (n == 2)
 200                __asm_copy_from_user_2(to, from, ret);
 201        else if (n == 3)
 202                __asm_copy_from_user_3(to, from, ret);
 203        else if (n == 4)
 204                __asm_copy_from_user_4(to, from, ret);
 205        else if (n == 5)
 206                __asm_copy_from_user_5(to, from, ret);
 207        else if (n == 6)
 208                __asm_copy_from_user_6(to, from, ret);
 209        else if (n == 7)
 210                __asm_copy_from_user_7(to, from, ret);
 211        else if (n == 8)
 212                __asm_copy_from_user_8(to, from, ret);
 213        else if (n == 9)
 214                __asm_copy_from_user_9(to, from, ret);
 215        else if (n == 10)
 216                __asm_copy_from_user_10(to, from, ret);
 217        else if (n == 11)
 218                __asm_copy_from_user_11(to, from, ret);
 219        else if (n == 12)
 220                __asm_copy_from_user_12(to, from, ret);
 221        else if (n == 13)
 222                __asm_copy_from_user_13(to, from, ret);
 223        else if (n == 14)
 224                __asm_copy_from_user_14(to, from, ret);
 225        else if (n == 15)
 226                __asm_copy_from_user_15(to, from, ret);
 227        else if (n == 16)
 228                __asm_copy_from_user_16(to, from, ret);
 229        else if (n == 20)
 230                __asm_copy_from_user_20(to, from, ret);
 231        else if (n == 24)
 232                __asm_copy_from_user_24(to, from, ret);
 233        else
 234                ret = __copy_user_in(to, from, n);
 235
 236        return ret;
 237}
 238
 239/* Ditto, don't make a switch out of this.  */
 240
 241static inline unsigned long
 242__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
 243{
 244        unsigned long ret = 0;
 245
 246        if (n == 0)
 247                ;
 248        else if (n == 1)
 249                __asm_copy_to_user_1(to, from, ret);
 250        else if (n == 2)
 251                __asm_copy_to_user_2(to, from, ret);
 252        else if (n == 3)
 253                __asm_copy_to_user_3(to, from, ret);
 254        else if (n == 4)
 255                __asm_copy_to_user_4(to, from, ret);
 256        else if (n == 5)
 257                __asm_copy_to_user_5(to, from, ret);
 258        else if (n == 6)
 259                __asm_copy_to_user_6(to, from, ret);
 260        else if (n == 7)
 261                __asm_copy_to_user_7(to, from, ret);
 262        else if (n == 8)
 263                __asm_copy_to_user_8(to, from, ret);
 264        else if (n == 9)
 265                __asm_copy_to_user_9(to, from, ret);
 266        else if (n == 10)
 267                __asm_copy_to_user_10(to, from, ret);
 268        else if (n == 11)
 269                __asm_copy_to_user_11(to, from, ret);
 270        else if (n == 12)
 271                __asm_copy_to_user_12(to, from, ret);
 272        else if (n == 13)
 273                __asm_copy_to_user_13(to, from, ret);
 274        else if (n == 14)
 275                __asm_copy_to_user_14(to, from, ret);
 276        else if (n == 15)
 277                __asm_copy_to_user_15(to, from, ret);
 278        else if (n == 16)
 279                __asm_copy_to_user_16(to, from, ret);
 280        else if (n == 20)
 281                __asm_copy_to_user_20(to, from, ret);
 282        else if (n == 24)
 283                __asm_copy_to_user_24(to, from, ret);
 284        else
 285                ret = __copy_user(to, from, n);
 286
 287        return ret;
 288}
 289
 290/* No switch, please.  */
 291
 292static inline unsigned long
 293__constant_clear_user(void __user *to, unsigned long n)
 294{
 295        unsigned long ret = 0;
 296
 297        if (n == 0)
 298                ;
 299        else if (n == 1)
 300                __asm_clear_1(to, ret);
 301        else if (n == 2)
 302                __asm_clear_2(to, ret);
 303        else if (n == 3)
 304                __asm_clear_3(to, ret);
 305        else if (n == 4)
 306                __asm_clear_4(to, ret);
 307        else if (n == 8)
 308                __asm_clear_8(to, ret);
 309        else if (n == 12)
 310                __asm_clear_12(to, ret);
 311        else if (n == 16)
 312                __asm_clear_16(to, ret);
 313        else if (n == 20)
 314                __asm_clear_20(to, ret);
 315        else if (n == 24)
 316                __asm_clear_24(to, ret);
 317        else
 318                ret = __do_clear_user(to, n);
 319
 320        return ret;
 321}
 322
 323
 324static inline size_t clear_user(void __user *to, size_t n)
 325{
 326        if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
 327                return n;
 328        if (__builtin_constant_p(n))
 329                return __constant_clear_user(to, n);
 330        else
 331                return __do_clear_user(to, n);
 332}
 333
 334static inline unsigned long
 335raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 336{
 337        if (__builtin_constant_p(n))
 338                return __constant_copy_from_user(to, from, n);
 339        else
 340                return __copy_user_in(to, from, n);
 341}
 342
 343static inline unsigned long
 344raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 345{
 346        if (__builtin_constant_p(n))
 347                return __constant_copy_to_user(to, from, n);
 348        else
 349                return __copy_user(to, from, n);
 350}
 351
 352#define INLINE_COPY_FROM_USER
 353#define INLINE_COPY_TO_USER
 354
 355static inline unsigned long
 356__clear_user(void __user *to, unsigned long n)
 357{
 358        return __do_clear_user(to, n);
 359}
 360
 361#endif  /* _CRIS_UACCESS_H */
 362