linux/include/linux/uaccess.h
<<
>>
Prefs
   1#ifndef __LINUX_UACCESS_H__
   2#define __LINUX_UACCESS_H__
   3
   4#include <linux/sched.h>
   5#include <linux/thread_info.h>
   6#include <linux/kasan-checks.h>
   7
   8#define VERIFY_READ 0
   9#define VERIFY_WRITE 1
  10
  11#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
  12
  13#include <asm/uaccess.h>
  14
  15/*
  16 * Architectures should provide two primitives (raw_copy_{to,from}_user())
  17 * and get rid of their private instances of copy_{to,from}_user() and
  18 * __copy_{to,from}_user{,_inatomic}().
  19 *
  20 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
  21 * return the amount left to copy.  They should assume that access_ok() has
  22 * already been checked (and succeeded); they should *not* zero-pad anything.
  23 * No KASAN or object size checks either - those belong here.
  24 *
  25 * Both of these functions should attempt to copy size bytes starting at from
  26 * into the area starting at to.  They must not fetch or store anything
  27 * outside of those areas.  Return value must be between 0 (everything
  28 * copied successfully) and size (nothing copied).
  29 *
  30 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
  31 * at to must become equal to the bytes fetched from the corresponding area
  32 * starting at from.  All data past to + size - N must be left unmodified.
  33 *
  34 * If copying succeeds, the return value must be 0.  If some data cannot be
  35 * fetched, it is permitted to copy less than had been fetched; the only
  36 * hard requirement is that not storing anything at all (i.e. returning size)
  37 * should happen only when nothing could be copied.  In other words, you don't
  38 * have to squeeze as much as possible - it is allowed, but not necessary.
  39 *
  40 * For raw_copy_from_user() to always points to kernel memory and no faults
  41 * on store should happen.  Interpretation of from is affected by set_fs().
  42 * For raw_copy_to_user() it's the other way round.
  43 *
  44 * Both can be inlined - it's up to architectures whether it wants to bother
  45 * with that.  They should not be used directly; they are used to implement
  46 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
  47 * that are used instead.  Out of those, __... ones are inlined.  Plain
  48 * copy_{to,from}_user() might or might not be inlined.  If you want them
  49 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
  50 *
  51 * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
  52 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
  53 * at all; their callers absolutely must check the return value.
  54 *
  55 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
  56 * but both source and destination are __user pointers (affected by set_fs()
  57 * as usual) and both source and destination can trigger faults.
  58 */
  59
  60static __always_inline unsigned long
  61__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
  62{
  63        kasan_check_write(to, n);
  64        check_object_size(to, n, false);
  65        return raw_copy_from_user(to, from, n);
  66}
  67
  68static __always_inline unsigned long
  69__copy_from_user(void *to, const void __user *from, unsigned long n)
  70{
  71        might_fault();
  72        kasan_check_write(to, n);
  73        check_object_size(to, n, false);
  74        return raw_copy_from_user(to, from, n);
  75}
  76
  77/**
  78 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
  79 * @to:   Destination address, in user space.
  80 * @from: Source address, in kernel space.
  81 * @n:    Number of bytes to copy.
  82 *
  83 * Context: User context only.
  84 *
  85 * Copy data from kernel space to user space.  Caller must check
  86 * the specified block with access_ok() before calling this function.
  87 * The caller should also make sure he pins the user space address
  88 * so that we don't result in page fault and sleep.
  89 */
  90static __always_inline unsigned long
  91__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
  92{
  93        kasan_check_read(from, n);
  94        check_object_size(from, n, true);
  95        return raw_copy_to_user(to, from, n);
  96}
  97
  98static __always_inline unsigned long
  99__copy_to_user(void __user *to, const void *from, unsigned long n)
 100{
 101        might_fault();
 102        kasan_check_read(from, n);
 103        check_object_size(from, n, true);
 104        return raw_copy_to_user(to, from, n);
 105}
 106
 107#ifdef INLINE_COPY_FROM_USER
 108static inline unsigned long
 109_copy_from_user(void *to, const void __user *from, unsigned long n)
 110{
 111        unsigned long res = n;
 112        might_fault();
 113        if (likely(access_ok(VERIFY_READ, from, n))) {
 114                kasan_check_write(to, n);
 115                res = raw_copy_from_user(to, from, n);
 116        }
 117        if (unlikely(res))
 118                memset(to + (n - res), 0, res);
 119        return res;
 120}
 121#else
 122extern unsigned long
 123_copy_from_user(void *, const void __user *, unsigned long);
 124#endif
 125
 126#ifdef INLINE_COPY_TO_USER
 127static inline unsigned long
 128_copy_to_user(void __user *to, const void *from, unsigned long n)
 129{
 130        might_fault();
 131        if (access_ok(VERIFY_WRITE, to, n)) {
 132                kasan_check_read(from, n);
 133                n = raw_copy_to_user(to, from, n);
 134        }
 135        return n;
 136}
 137#else
 138extern unsigned long
 139_copy_to_user(void __user *, const void *, unsigned long);
 140#endif
 141
 142static __always_inline unsigned long __must_check
 143copy_from_user(void *to, const void __user *from, unsigned long n)
 144{
 145        if (likely(check_copy_size(to, n, false)))
 146                n = _copy_from_user(to, from, n);
 147        return n;
 148}
 149
 150static __always_inline unsigned long __must_check
 151copy_to_user(void __user *to, const void *from, unsigned long n)
 152{
 153        if (likely(check_copy_size(from, n, true)))
 154                n = _copy_to_user(to, from, n);
 155        return n;
 156}
 157#ifdef CONFIG_COMPAT
 158static __always_inline unsigned long __must_check
 159copy_in_user(void __user *to, const void *from, unsigned long n)
 160{
 161        might_fault();
 162        if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
 163                n = raw_copy_in_user(to, from, n);
 164        return n;
 165}
 166#endif
 167
 168static __always_inline void pagefault_disabled_inc(void)
 169{
 170        current->pagefault_disabled++;
 171}
 172
 173static __always_inline void pagefault_disabled_dec(void)
 174{
 175        current->pagefault_disabled--;
 176}
 177
 178/*
 179 * These routines enable/disable the pagefault handler. If disabled, it will
 180 * not take any locks and go straight to the fixup table.
 181 *
 182 * User access methods will not sleep when called from a pagefault_disabled()
 183 * environment.
 184 */
 185static inline void pagefault_disable(void)
 186{
 187        pagefault_disabled_inc();
 188        /*
 189         * make sure to have issued the store before a pagefault
 190         * can hit.
 191         */
 192        barrier();
 193}
 194
 195static inline void pagefault_enable(void)
 196{
 197        /*
 198         * make sure to issue those last loads/stores before enabling
 199         * the pagefault handler again.
 200         */
 201        barrier();
 202        pagefault_disabled_dec();
 203}
 204
 205/*
 206 * Is the pagefault handler disabled? If so, user access methods will not sleep.
 207 */
 208#define pagefault_disabled() (current->pagefault_disabled != 0)
 209
 210/*
 211 * The pagefault handler is in general disabled by pagefault_disable() or
 212 * when in irq context (via in_atomic()).
 213 *
 214 * This function should only be used by the fault handlers. Other users should
 215 * stick to pagefault_disabled().
 216 * Please NEVER use preempt_disable() to disable the fault handler. With
 217 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
 218 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
 219 */
 220#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
 221
 222#ifndef ARCH_HAS_NOCACHE_UACCESS
 223
 224static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
 225                                const void __user *from, unsigned long n)
 226{
 227        return __copy_from_user_inatomic(to, from, n);
 228}
 229
 230#endif          /* ARCH_HAS_NOCACHE_UACCESS */
 231
 232/*
 233 * probe_kernel_read(): safely attempt to read from a location
 234 * @dst: pointer to the buffer that shall take the data
 235 * @src: address to read from
 236 * @size: size of the data chunk
 237 *
 238 * Safely read from address @src to the buffer at @dst.  If a kernel fault
 239 * happens, handle that and return -EFAULT.
 240 */
 241extern long probe_kernel_read(void *dst, const void *src, size_t size);
 242extern long __probe_kernel_read(void *dst, const void *src, size_t size);
 243
 244/*
 245 * probe_kernel_write(): safely attempt to write to a location
 246 * @dst: address to write to
 247 * @src: pointer to the data that shall be written
 248 * @size: size of the data chunk
 249 *
 250 * Safely write to address @dst from the buffer at @src.  If a kernel fault
 251 * happens, handle that and return -EFAULT.
 252 */
 253extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
 254extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
 255
 256extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
 257
 258/**
 259 * probe_kernel_address(): safely attempt to read from a location
 260 * @addr: address to read from
 261 * @retval: read into this variable
 262 *
 263 * Returns 0 on success, or -EFAULT.
 264 */
 265#define probe_kernel_address(addr, retval)              \
 266        probe_kernel_read(&retval, addr, sizeof(retval))
 267
 268#ifndef user_access_begin
 269#define user_access_begin() do { } while (0)
 270#define user_access_end() do { } while (0)
 271#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
 272#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
 273#endif
 274
 275#endif          /* __LINUX_UACCESS_H__ */
 276