linux/arch/mn10300/include/asm/uaccess.h
<<
>>
Prefs
   1/* MN10300 userspace access functions
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public Licence
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the Licence, or (at your option) any later version.
  10 */
  11#ifndef _ASM_UACCESS_H
  12#define _ASM_UACCESS_H
  13
  14/*
  15 * User space memory access functions
  16 */
  17#include <linux/sched.h>
  18#include <asm/page.h>
  19#include <asm/pgtable.h>
  20#include <asm/errno.h>
  21
  22#define VERIFY_READ 0
  23#define VERIFY_WRITE 1
  24
  25/*
  26 * The fs value determines whether argument validity checking should be
  27 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  28 * get_fs() == KERNEL_DS, checking is bypassed.
  29 *
  30 * For historical reasons, these macros are grossly misnamed.
  31 */
  32
  33#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
  34
  35#define KERNEL_XDS      MAKE_MM_SEG(0xBFFFFFFF)
  36#define KERNEL_DS       MAKE_MM_SEG(0x9FFFFFFF)
  37#define USER_DS         MAKE_MM_SEG(TASK_SIZE)
  38
  39#define get_ds()        (KERNEL_DS)
  40#define get_fs()        (current_thread_info()->addr_limit)
  41#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  42#define __kernel_ds_p() (current_thread_info()->addr_limit.seg == 0x9FFFFFFF)
  43
  44#define segment_eq(a, b) ((a).seg == (b).seg)
  45
  46#define __addr_ok(addr) \
  47        ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
  48
  49/*
  50 * check that a range of addresses falls within the current address limit
  51 */
  52static inline int ___range_ok(unsigned long addr, unsigned int size)
  53{
  54        int flag = 1, tmp;
  55
  56        asm("   add     %3,%1   \n"     /* set C-flag if addr + size > 4Gb */
  57            "   bcs     0f      \n"
  58            "   cmp     %4,%1   \n"     /* jump if addr+size>limit (error) */
  59            "   bhi     0f      \n"
  60            "   clr     %0      \n"     /* mark okay */
  61            "0:                 \n"
  62            : "=r"(flag), "=&r"(tmp)
  63            : "1"(addr), "ir"(size),
  64              "r"(current_thread_info()->addr_limit.seg), "0"(flag)
  65            : "cc"
  66            );
  67
  68        return flag;
  69}
  70
  71#define __range_ok(addr, size) ___range_ok((unsigned long)(addr), (u32)(size))
  72
  73#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
  74#define __access_ok(addr, size)     (__range_ok((addr), (size)) == 0)
  75
  76static inline int verify_area(int type, const void *addr, unsigned long size)
  77{
  78        return access_ok(type, addr, size) ? 0 : -EFAULT;
  79}
  80
  81
  82/*
  83 * The exception table consists of pairs of addresses: the first is the
  84 * address of an instruction that is allowed to fault, and the second is
  85 * the address at which the program should continue.  No registers are
  86 * modified, so it is entirely up to the continuation code to figure out
  87 * what to do.
  88 *
  89 * All the routines below use bits of fixup code that are out of line
  90 * with the main instruction path.  This means when everything is well,
  91 * we don't even have to jump over them.  Further, they do not intrude
  92 * on our cache or tlb entries.
  93 */
  94
  95struct exception_table_entry
  96{
  97        unsigned long insn, fixup;
  98};
  99
 100/* Returns 0 if exception not found and fixup otherwise.  */
 101extern int fixup_exception(struct pt_regs *regs);
 102
 103#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
 104#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
 105
 106/*
 107 * The "__xxx" versions do not do address space checking, useful when
 108 * doing multiple accesses to the same area (the user has to do the
 109 * checks by hand with "access_ok()")
 110 */
 111#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
 112#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 113
 114/*
 115 * The "xxx_ret" versions return constant specified in third argument, if
 116 * something bad happens. These macros can be optimized for the
 117 * case of just returning from the function xxx_ret is used.
 118 */
 119
 120#define put_user_ret(x, ptr, ret) \
 121        ({ if (put_user((x), (ptr)))    return (ret); })
 122#define get_user_ret(x, ptr, ret) \
 123        ({ if (get_user((x), (ptr)))    return (ret); })
 124#define __put_user_ret(x, ptr, ret) \
 125        ({ if (__put_user((x), (ptr)))  return (ret); })
 126#define __get_user_ret(x, ptr, ret) \
 127        ({ if (__get_user((x), (ptr)))  return (ret); })
 128
 129struct __large_struct { unsigned long buf[100]; };
 130#define __m(x) (*(struct __large_struct *)(x))
 131
 132#define __get_user_nocheck(x, ptr, size)                                \
 133({                                                                      \
 134        unsigned long __gu_addr;                                        \
 135        int __gu_err;                                                   \
 136        __gu_addr = (unsigned long) (ptr);                              \
 137        switch (size) {                                                 \
 138        case 1: {                                                       \
 139                unsigned char __gu_val;                                 \
 140                __get_user_asm("bu");                                   \
 141                (x) = *(__force __typeof__(*(ptr))*) &__gu_val;         \
 142                break;                                                  \
 143        }                                                               \
 144        case 2: {                                                       \
 145                unsigned short __gu_val;                                \
 146                __get_user_asm("hu");                                   \
 147                (x) = *(__force __typeof__(*(ptr))*) &__gu_val;         \
 148                break;                                                  \
 149        }                                                               \
 150        case 4: {                                                       \
 151                unsigned int __gu_val;                                  \
 152                __get_user_asm("");                                     \
 153                (x) = *(__force __typeof__(*(ptr))*) &__gu_val;         \
 154                break;                                                  \
 155        }                                                               \
 156        default:                                                        \
 157                __get_user_unknown();                                   \
 158                break;                                                  \
 159        }                                                               \
 160        __gu_err;                                                       \
 161})
 162
 163#define __get_user_check(x, ptr, size)                                  \
 164({                                                                      \
 165        int _e;                                                         \
 166        if (likely(__access_ok((unsigned long) (ptr), (size))))         \
 167                _e = __get_user_nocheck((x), (ptr), (size));            \
 168        else {                                                          \
 169                _e = -EFAULT;                                           \
 170                (x) = (__typeof__(x))0;                                 \
 171        }                                                               \
 172        _e;                                                             \
 173})
 174
 175#define __get_user_asm(INSN)                                    \
 176({                                                              \
 177        asm volatile(                                   \
 178                "1:\n"                                          \
 179                "       mov"INSN"       %2,%1\n"                \
 180                "       mov             0,%0\n"                 \
 181                "2:\n"                                          \
 182                "       .section        .fixup,\"ax\"\n"        \
 183                "3:\n\t"                                        \
 184                "       mov             %3,%0\n"                \
 185                "       jmp             2b\n"                   \
 186                "       .previous\n"                            \
 187                "       .section        __ex_table,\"a\"\n"     \
 188                "       .balign         4\n"                    \
 189                "       .long           1b, 3b\n"               \
 190                "       .previous"                              \
 191                : "=&r" (__gu_err), "=&r" (__gu_val)            \
 192                : "m" (__m(__gu_addr)), "i" (-EFAULT));         \
 193})
 194
 195extern int __get_user_unknown(void);
 196
 197#define __put_user_nocheck(x, ptr, size)                        \
 198({                                                              \
 199        union {                                                 \
 200                __typeof__(*(ptr)) val;                         \
 201                u32 bits[2];                                    \
 202        } __pu_val;                                             \
 203        unsigned long __pu_addr;                                \
 204        int __pu_err;                                           \
 205        __pu_val.val = (x);                                     \
 206        __pu_addr = (unsigned long) (ptr);                      \
 207        switch (size) {                                         \
 208        case 1:  __put_user_asm("bu"); break;                   \
 209        case 2:  __put_user_asm("hu"); break;                   \
 210        case 4:  __put_user_asm(""  ); break;                   \
 211        case 8:  __put_user_asm8();    break;                   \
 212        default: __pu_err = __put_user_unknown(); break;        \
 213        }                                                       \
 214        __pu_err;                                               \
 215})
 216
 217#define __put_user_check(x, ptr, size)                                  \
 218({                                                                      \
 219        union {                                                         \
 220                __typeof__(*(ptr)) val;                                 \
 221                u32 bits[2];                                            \
 222        } __pu_val;                                                     \
 223        unsigned long __pu_addr;                                        \
 224        int __pu_err;                                                   \
 225        __pu_val.val = (x);                                             \
 226        __pu_addr = (unsigned long) (ptr);                              \
 227        if (likely(__access_ok(__pu_addr, size))) {                     \
 228                switch (size) {                                         \
 229                case 1:  __put_user_asm("bu"); break;                   \
 230                case 2:  __put_user_asm("hu"); break;                   \
 231                case 4:  __put_user_asm(""  ); break;                   \
 232                case 8:  __put_user_asm8();    break;                   \
 233                default: __pu_err = __put_user_unknown(); break;        \
 234                }                                                       \
 235        }                                                               \
 236        else {                                                          \
 237                __pu_err = -EFAULT;                                     \
 238        }                                                               \
 239        __pu_err;                                                       \
 240})
 241
 242#define __put_user_asm(INSN)                                    \
 243({                                                              \
 244        asm volatile(                                           \
 245                "1:\n"                                          \
 246                "       mov"INSN"       %1,%2\n"                \
 247                "       mov             0,%0\n"                 \
 248                "2:\n"                                          \
 249                "       .section        .fixup,\"ax\"\n"        \
 250                "3:\n"                                          \
 251                "       mov             %3,%0\n"                \
 252                "       jmp             2b\n"                   \
 253                "       .previous\n"                            \
 254                "       .section        __ex_table,\"a\"\n"     \
 255                "       .balign         4\n"                    \
 256                "       .long           1b, 3b\n"               \
 257                "       .previous"                              \
 258                : "=&r" (__pu_err)                              \
 259                : "r" (__pu_val.val), "m" (__m(__pu_addr)),     \
 260                  "i" (-EFAULT)                                 \
 261                );                                              \
 262})
 263
 264#define __put_user_asm8()                                               \
 265({                                                                      \
 266        asm volatile(                                                   \
 267                "1:     mov             %1,%3           \n"             \
 268                "2:     mov             %2,%4           \n"             \
 269                "       mov             0,%0            \n"             \
 270                "3:                                     \n"             \
 271                "       .section        .fixup,\"ax\"   \n"             \
 272                "4:                                     \n"             \
 273                "       mov             %5,%0           \n"             \
 274                "       jmp             3b              \n"             \
 275                "       .previous                       \n"             \
 276                "       .section        __ex_table,\"a\"\n"             \
 277                "       .balign         4               \n"             \
 278                "       .long           1b, 4b          \n"             \
 279                "       .long           2b, 4b          \n"             \
 280                "       .previous                       \n"             \
 281                : "=&r" (__pu_err)                                      \
 282                : "r" (__pu_val.bits[0]), "r" (__pu_val.bits[1]),       \
 283                  "m" (__m(__pu_addr)), "m" (__m(__pu_addr+4)),         \
 284                  "i" (-EFAULT)                                         \
 285                );                                                      \
 286})
 287
 288extern int __put_user_unknown(void);
 289
 290
 291/*
 292 * Copy To/From Userspace
 293 */
 294/* Generic arbitrary sized copy.  */
 295#define __copy_user(to, from, size)                                     \
 296do {                                                                    \
 297        if (size) {                                                     \
 298                void *__to = to;                                        \
 299                const void *__from = from;                              \
 300                int w;                                                  \
 301                asm volatile(                                           \
 302                        "0:     movbu   (%0),%3;\n"                     \
 303                        "1:     movbu   %3,(%1);\n"                     \
 304                        "       inc     %0;\n"                          \
 305                        "       inc     %1;\n"                          \
 306                        "       add     -1,%2;\n"                       \
 307                        "       bne     0b;\n"                          \
 308                        "2:\n"                                          \
 309                        "       .section .fixup,\"ax\"\n"               \
 310                        "3:     jmp     2b\n"                           \
 311                        "       .previous\n"                            \
 312                        "       .section __ex_table,\"a\"\n"            \
 313                        "       .balign 4\n"                            \
 314                        "       .long   0b,3b\n"                        \
 315                        "       .long   1b,3b\n"                        \
 316                        "       .previous\n"                            \
 317                        : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
 318                        : "0"(__from), "1"(__to), "2"(size)             \
 319                        : "memory");                                    \
 320        }                                                               \
 321} while (0)
 322
 323#define __copy_user_zeroing(to, from, size)                             \
 324do {                                                                    \
 325        if (size) {                                                     \
 326                void *__to = to;                                        \
 327                const void *__from = from;                              \
 328                int w;                                                  \
 329                asm volatile(                                           \
 330                        "0:     movbu   (%0),%3;\n"                     \
 331                        "1:     movbu   %3,(%1);\n"                     \
 332                        "       inc     %0;\n"                          \
 333                        "       inc     %1;\n"                          \
 334                        "       add     -1,%2;\n"                       \
 335                        "       bne     0b;\n"                          \
 336                        "2:\n"                                          \
 337                        "       .section .fixup,\"ax\"\n"               \
 338                        "3:\n"                                          \
 339                        "       mov     %2,%0\n"                        \
 340                        "       clr     %3\n"                           \
 341                        "4:     movbu   %3,(%1);\n"                     \
 342                        "       inc     %1;\n"                          \
 343                        "       add     -1,%2;\n"                       \
 344                        "       bne     4b;\n"                          \
 345                        "       mov     %0,%2\n"                        \
 346                        "       jmp     2b\n"                           \
 347                        "       .previous\n"                            \
 348                        "       .section __ex_table,\"a\"\n"            \
 349                        "       .balign 4\n"                            \
 350                        "       .long   0b,3b\n"                        \
 351                        "       .long   1b,3b\n"                        \
 352                        "       .previous\n"                            \
 353                        : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
 354                        : "0"(__from), "1"(__to), "2"(size)             \
 355                        : "memory");                                    \
 356        }                                                               \
 357} while (0)
 358
 359/* We let the __ versions of copy_from/to_user inline, because they're often
 360 * used in fast paths and have only a small space overhead.
 361 */
 362static inline
 363unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
 364                                               unsigned long n)
 365{
 366        __copy_user_zeroing(to, from, n);
 367        return n;
 368}
 369
 370static inline
 371unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
 372                                             unsigned long n)
 373{
 374        __copy_user(to, from, n);
 375        return n;
 376}
 377
 378
 379#if 0
 380#error don't use - these macros don't increment to & from pointers
 381/* Optimize just a little bit when we know the size of the move. */
 382#define __constant_copy_user(to, from, size)    \
 383do {                                            \
 384        asm volatile(                           \
 385                "       mov %0,a0;\n"           \
 386                "0:     movbu (%1),d3;\n"       \
 387                "1:     movbu d3,(%2);\n"       \
 388                "       add -1,a0;\n"           \
 389                "       bne 0b;\n"              \
 390                "2:;"                           \
 391                ".section .fixup,\"ax\"\n"      \
 392                "3:     jmp 2b\n"               \
 393                ".previous\n"                   \
 394                ".section __ex_table,\"a\"\n"   \
 395                "       .balign 4\n"            \
 396                "       .long 0b,3b\n"          \
 397                "       .long 1b,3b\n"          \
 398                ".previous"                     \
 399                :                               \
 400                : "d"(size), "d"(to), "d"(from) \
 401                : "d3", "a0");                  \
 402} while (0)
 403
 404/* Optimize just a little bit when we know the size of the move. */
 405#define __constant_copy_user_zeroing(to, from, size)    \
 406do {                                                    \
 407        asm volatile(                                   \
 408                "       mov %0,a0;\n"                   \
 409                "0:     movbu (%1),d3;\n"               \
 410                "1:     movbu d3,(%2);\n"               \
 411                "       add -1,a0;\n"                   \
 412                "       bne 0b;\n"                      \
 413                "2:;"                                   \
 414                ".section .fixup,\"ax\"\n"              \
 415                "3:     jmp 2b\n"                       \
 416                ".previous\n"                           \
 417                ".section __ex_table,\"a\"\n"           \
 418                "       .balign 4\n"                    \
 419                "       .long 0b,3b\n"                  \
 420                "       .long 1b,3b\n"                  \
 421                ".previous"                             \
 422                :                                       \
 423                : "d"(size), "d"(to), "d"(from)         \
 424                : "d3", "a0");                          \
 425} while (0)
 426
 427static inline
 428unsigned long __constant_copy_to_user(void *to, const void *from,
 429                                      unsigned long n)
 430{
 431        if (access_ok(VERIFY_WRITE, to, n))
 432                __constant_copy_user(to, from, n);
 433        return n;
 434}
 435
 436static inline
 437unsigned long __constant_copy_from_user(void *to, const void *from,
 438                                        unsigned long n)
 439{
 440        if (access_ok(VERIFY_READ, from, n))
 441                __constant_copy_user_zeroing(to, from, n);
 442        return n;
 443}
 444
 445static inline
 446unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
 447                                              unsigned long n)
 448{
 449        __constant_copy_user(to, from, n);
 450        return n;
 451}
 452
 453static inline
 454unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
 455                                                unsigned long n)
 456{
 457        __constant_copy_user_zeroing(to, from, n);
 458        return n;
 459}
 460#endif
 461
 462extern unsigned long __generic_copy_to_user(void __user *, const void *,
 463                                            unsigned long);
 464extern unsigned long __generic_copy_from_user(void *, const void __user *,
 465                                              unsigned long);
 466
 467#define __copy_to_user_inatomic(to, from, n) \
 468        __generic_copy_to_user_nocheck((to), (from), (n))
 469#define __copy_from_user_inatomic(to, from, n) \
 470        __generic_copy_from_user_nocheck((to), (from), (n))
 471
 472#define __copy_to_user(to, from, n)                     \
 473({                                                      \
 474        might_sleep();                                  \
 475        __copy_to_user_inatomic((to), (from), (n));     \
 476})
 477
 478#define __copy_from_user(to, from, n)                   \
 479({                                                      \
 480        might_sleep();                                  \
 481        __copy_from_user_inatomic((to), (from), (n));   \
 482})
 483
 484
 485#define copy_to_user(to, from, n)   __generic_copy_to_user((to), (from), (n))
 486#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
 487
 488extern long strncpy_from_user(char *dst, const char __user *src, long count);
 489extern long __strncpy_from_user(char *dst, const char __user *src, long count);
 490extern long strnlen_user(const char __user *str, long n);
 491#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
 492extern unsigned long clear_user(void __user *mem, unsigned long len);
 493extern unsigned long __clear_user(void __user *mem, unsigned long len);
 494
 495#endif /* _ASM_UACCESS_H */
 496