linux/arch/blackfin/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 * Copyright 2004-2009 Analog Devices Inc.
   3 *
   4 * Licensed under the GPL-2 or later.
   5 *
   6 * Based on: include/asm-m68knommu/uaccess.h
   7 */
   8
   9#ifndef __BLACKFIN_UACCESS_H
  10#define __BLACKFIN_UACCESS_H
  11
  12/*
  13 * User space memory access functions
  14 */
  15#include <linux/sched.h>
  16#include <linux/mm.h>
  17#include <linux/string.h>
  18
  19#include <asm/segment.h>
  20#include <asm/sections.h>
  21
  22#define get_ds()        (KERNEL_DS)
  23#define get_fs()        (current_thread_info()->addr_limit)
  24
  25static inline void set_fs(mm_segment_t fs)
  26{
  27        current_thread_info()->addr_limit = fs;
  28}
  29
  30#define segment_eq(a,b) ((a) == (b))
  31
  32#define VERIFY_READ     0
  33#define VERIFY_WRITE    1
  34
  35#define access_ok(type, addr, size) _access_ok((unsigned long)(addr), (size))
  36
  37static inline int is_in_rom(unsigned long addr)
  38{
  39        /*
  40         * What we are really trying to do is determine if addr is
  41         * in an allocated kernel memory region. If not then assume
  42         * we cannot free it or otherwise de-allocate it. Ideally
  43         * we could restrict this to really being in a ROM or flash,
  44         * but that would need to be done on a board by board basis,
  45         * not globally.
  46         */
  47        if ((addr < _ramstart) || (addr >= _ramend))
  48                return (1);
  49
  50        /* Default case, not in ROM */
  51        return (0);
  52}
  53
  54/*
  55 * The fs value determines whether argument validity checking should be
  56 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  57 * get_fs() == KERNEL_DS, checking is bypassed.
  58 */
  59
  60#ifndef CONFIG_ACCESS_CHECK
  61static inline int _access_ok(unsigned long addr, unsigned long size) { return 1; }
  62#else
  63extern int _access_ok(unsigned long addr, unsigned long size);
  64#endif
  65
  66/*
  67 * The exception table consists of pairs of addresses: the first is the
  68 * address of an instruction that is allowed to fault, and the second is
  69 * the address at which the program should continue.  No registers are
  70 * modified, so it is entirely up to the continuation code to figure out
  71 * what to do.
  72 *
  73 * All the routines below use bits of fixup code that are out of line
  74 * with the main instruction path.  This means when everything is well,
  75 * we don't even have to jump over them.  Further, they do not intrude
  76 * on our cache or tlb entries.
  77 */
  78
  79struct exception_table_entry {
  80        unsigned long insn, fixup;
  81};
  82
  83/*
  84 * These are the main single-value transfer routines.  They automatically
  85 * use the right size if we just have the right pointer type.
  86 */
  87
  88#define put_user(x,p)                                           \
  89        ({                                                      \
  90                int _err = 0;                                   \
  91                typeof(*(p)) _x = (x);                          \
  92                typeof(*(p)) *_p = (p);                         \
  93                if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\
  94                        _err = -EFAULT;                         \
  95                }                                               \
  96                else {                                          \
  97                switch (sizeof (*(_p))) {                       \
  98                case 1:                                         \
  99                        __put_user_asm(_x, _p, B);              \
 100                        break;                                  \
 101                case 2:                                         \
 102                        __put_user_asm(_x, _p, W);              \
 103                        break;                                  \
 104                case 4:                                         \
 105                        __put_user_asm(_x, _p,  );              \
 106                        break;                                  \
 107                case 8: {                                       \
 108                        long _xl, _xh;                          \
 109                        _xl = ((long *)&_x)[0];                 \
 110                        _xh = ((long *)&_x)[1];                 \
 111                        __put_user_asm(_xl, ((long *)_p)+0, );  \
 112                        __put_user_asm(_xh, ((long *)_p)+1, );  \
 113                } break;                                        \
 114                default:                                        \
 115                        _err = __put_user_bad();                \
 116                        break;                                  \
 117                }                                               \
 118                }                                               \
 119                _err;                                           \
 120        })
 121
 122#define __put_user(x,p) put_user(x,p)
 123static inline int bad_user_access_length(void)
 124{
 125        panic("bad_user_access_length");
 126        return -1;
 127}
 128
 129#define __put_user_bad() (printk(KERN_INFO "put_user_bad %s:%d %s\n",\
 130                           __FILE__, __LINE__, __func__),\
 131                           bad_user_access_length(), (-EFAULT))
 132
 133/*
 134 * Tell gcc we read from memory instead of writing: this is because
 135 * we do not write to any memory gcc knows about, so there are no
 136 * aliasing issues.
 137 */
 138
 139#define __ptr(x) ((unsigned long *)(x))
 140
 141#define __put_user_asm(x,p,bhw)                         \
 142        __asm__ (#bhw"[%1] = %0;\n\t"                   \
 143                 : /* no outputs */                     \
 144                 :"d" (x),"a" (__ptr(p)) : "memory")
 145
 146#define get_user(x, ptr)                                        \
 147({                                                              \
 148        int _err = 0;                                           \
 149        unsigned long _val = 0;                                 \
 150        const typeof(*(ptr)) __user *_p = (ptr);                \
 151        const size_t ptr_size = sizeof(*(_p));                  \
 152        if (likely(access_ok(VERIFY_READ, _p, ptr_size))) {     \
 153                BUILD_BUG_ON(ptr_size >= 8);                    \
 154                switch (ptr_size) {                             \
 155                case 1:                                         \
 156                        __get_user_asm(_val, _p, B,(Z));        \
 157                        break;                                  \
 158                case 2:                                         \
 159                        __get_user_asm(_val, _p, W,(Z));        \
 160                        break;                                  \
 161                case 4:                                         \
 162                        __get_user_asm(_val, _p,  , );          \
 163                        break;                                  \
 164                }                                               \
 165        } else                                                  \
 166                _err = -EFAULT;                                 \
 167        x = (typeof(*(ptr)))_val;                               \
 168        _err;                                                   \
 169})
 170
 171#define __get_user(x,p) get_user(x,p)
 172
 173#define __get_user_bad() (bad_user_access_length(), (-EFAULT))
 174
 175#define __get_user_asm(x, ptr, bhw, option)     \
 176({                                              \
 177        __asm__ __volatile__ (                  \
 178                "%0 =" #bhw "[%1]" #option ";"  \
 179                : "=d" (x)                      \
 180                : "a" (__ptr(ptr)));            \
 181})
 182
 183#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
 184#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
 185#define __copy_to_user_inatomic __copy_to_user
 186#define __copy_from_user_inatomic __copy_from_user
 187
 188#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n))\
 189                                                 return retval; })
 190
 191#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\
 192                                                   return retval; })
 193
 194static inline unsigned long __must_check
 195copy_from_user(void *to, const void __user *from, unsigned long n)
 196{
 197        if (access_ok(VERIFY_READ, from, n))
 198                memcpy(to, from, n);
 199        else
 200                return n;
 201        return 0;
 202}
 203
 204static inline unsigned long __must_check
 205copy_to_user(void *to, const void __user *from, unsigned long n)
 206{
 207        if (access_ok(VERIFY_WRITE, to, n))
 208                memcpy(to, from, n);
 209        else
 210                return n;
 211        return 0;
 212}
 213
 214/*
 215 * Copy a null terminated string from userspace.
 216 */
 217
 218static inline long __must_check
 219strncpy_from_user(char *dst, const char *src, long count)
 220{
 221        char *tmp;
 222        if (!access_ok(VERIFY_READ, src, 1))
 223                return -EFAULT;
 224        strncpy(dst, src, count);
 225        for (tmp = dst; *tmp && count > 0; tmp++, count--) ;
 226        return (tmp - dst);
 227}
 228
 229/*
 230 * Get the size of a string in user space.
 231 *   src: The string to measure
 232 *     n: The maximum valid length
 233 *
 234 * Get the size of a NUL-terminated string in user space.
 235 *
 236 * Returns the size of the string INCLUDING the terminating NUL.
 237 * On exception, returns 0.
 238 * If the string is too long, returns a value greater than n.
 239 */
 240static inline long __must_check strnlen_user(const char *src, long n)
 241{
 242        if (!access_ok(VERIFY_READ, src, 1))
 243                return 0;
 244        return strnlen(src, n) + 1;
 245}
 246
 247static inline long __must_check strlen_user(const char *src)
 248{
 249        if (!access_ok(VERIFY_READ, src, 1))
 250                return 0;
 251        return strlen(src) + 1;
 252}
 253
 254/*
 255 * Zero Userspace
 256 */
 257
 258static inline unsigned long __must_check
 259__clear_user(void *to, unsigned long n)
 260{
 261        if (!access_ok(VERIFY_WRITE, to, n))
 262                return n;
 263        memset(to, 0, n);
 264        return 0;
 265}
 266
 267#define clear_user(to, n) __clear_user(to, n)
 268
 269/* How to interpret these return values:
 270 *      CORE:      can be accessed by core load or dma memcpy
 271 *      CORE_ONLY: can only be accessed by core load
 272 *      DMA:       can only be accessed by dma memcpy
 273 *      IDMA:      can only be accessed by interprocessor dma memcpy (BF561)
 274 *      ITEST:     can be accessed by isram memcpy or dma memcpy
 275 */
 276enum {
 277        BFIN_MEM_ACCESS_CORE = 0,
 278        BFIN_MEM_ACCESS_CORE_ONLY,
 279        BFIN_MEM_ACCESS_DMA,
 280        BFIN_MEM_ACCESS_IDMA,
 281        BFIN_MEM_ACCESS_ITEST,
 282};
 283/**
 284 *      bfin_mem_access_type() - what kind of memory access is required
 285 *      @addr:   the address to check
 286 *      @size:   number of bytes needed
 287 *      @return: <0 is error, >=0 is BFIN_MEM_ACCESS_xxx enum (see above)
 288 */
 289int bfin_mem_access_type(unsigned long addr, unsigned long size);
 290
 291#endif                          /* _BLACKFIN_UACCESS_H */
 292