linux/include/linux/uaccess.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_UACCESS_H__
   3#define __LINUX_UACCESS_H__
   4
   5#include <linux/fault-inject-usercopy.h>
   6#include <linux/instrumented.h>
   7#include <linux/minmax.h>
   8#include <linux/sched.h>
   9#include <linux/thread_info.h>
  10
  11#include <asm/uaccess.h>
  12
  13#ifdef CONFIG_SET_FS
  14/*
  15 * Force the uaccess routines to be wired up for actual userspace access,
  16 * overriding any possible set_fs(KERNEL_DS) still lingering around.  Undone
  17 * using force_uaccess_end below.
  18 */
  19static inline mm_segment_t force_uaccess_begin(void)
  20{
  21        mm_segment_t fs = get_fs();
  22
  23        set_fs(USER_DS);
  24        return fs;
  25}
  26
  27static inline void force_uaccess_end(mm_segment_t oldfs)
  28{
  29        set_fs(oldfs);
  30}
  31#else /* CONFIG_SET_FS */
  32typedef struct {
  33        /* empty dummy */
  34} mm_segment_t;
  35
  36#ifndef TASK_SIZE_MAX
  37#define TASK_SIZE_MAX                   TASK_SIZE
  38#endif
  39
  40#define uaccess_kernel()                (false)
  41#define user_addr_max()                 (TASK_SIZE_MAX)
  42
  43static inline mm_segment_t force_uaccess_begin(void)
  44{
  45        return (mm_segment_t) { };
  46}
  47
  48static inline void force_uaccess_end(mm_segment_t oldfs)
  49{
  50}
  51#endif /* CONFIG_SET_FS */
  52
  53/*
  54 * Architectures should provide two primitives (raw_copy_{to,from}_user())
  55 * and get rid of their private instances of copy_{to,from}_user() and
  56 * __copy_{to,from}_user{,_inatomic}().
  57 *
  58 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
  59 * return the amount left to copy.  They should assume that access_ok() has
  60 * already been checked (and succeeded); they should *not* zero-pad anything.
  61 * No KASAN or object size checks either - those belong here.
  62 *
  63 * Both of these functions should attempt to copy size bytes starting at from
  64 * into the area starting at to.  They must not fetch or store anything
  65 * outside of those areas.  Return value must be between 0 (everything
  66 * copied successfully) and size (nothing copied).
  67 *
  68 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
  69 * at to must become equal to the bytes fetched from the corresponding area
  70 * starting at from.  All data past to + size - N must be left unmodified.
  71 *
  72 * If copying succeeds, the return value must be 0.  If some data cannot be
  73 * fetched, it is permitted to copy less than had been fetched; the only
  74 * hard requirement is that not storing anything at all (i.e. returning size)
  75 * should happen only when nothing could be copied.  In other words, you don't
  76 * have to squeeze as much as possible - it is allowed, but not necessary.
  77 *
  78 * For raw_copy_from_user() to always points to kernel memory and no faults
  79 * on store should happen.  Interpretation of from is affected by set_fs().
  80 * For raw_copy_to_user() it's the other way round.
  81 *
  82 * Both can be inlined - it's up to architectures whether it wants to bother
  83 * with that.  They should not be used directly; they are used to implement
  84 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
  85 * that are used instead.  Out of those, __... ones are inlined.  Plain
  86 * copy_{to,from}_user() might or might not be inlined.  If you want them
  87 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
  88 *
  89 * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
  90 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
  91 * at all; their callers absolutely must check the return value.
  92 *
  93 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
  94 * but both source and destination are __user pointers (affected by set_fs()
  95 * as usual) and both source and destination can trigger faults.
  96 */
  97
  98static __always_inline __must_check unsigned long
  99__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 100{
 101        instrument_copy_from_user(to, from, n);
 102        check_object_size(to, n, false);
 103        return raw_copy_from_user(to, from, n);
 104}
 105
 106static __always_inline __must_check unsigned long
 107__copy_from_user(void *to, const void __user *from, unsigned long n)
 108{
 109        might_fault();
 110        if (should_fail_usercopy())
 111                return n;
 112        instrument_copy_from_user(to, from, n);
 113        check_object_size(to, n, false);
 114        return raw_copy_from_user(to, from, n);
 115}
 116
 117/**
 118 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
 119 * @to:   Destination address, in user space.
 120 * @from: Source address, in kernel space.
 121 * @n:    Number of bytes to copy.
 122 *
 123 * Context: User context only.
 124 *
 125 * Copy data from kernel space to user space.  Caller must check
 126 * the specified block with access_ok() before calling this function.
 127 * The caller should also make sure he pins the user space address
 128 * so that we don't result in page fault and sleep.
 129 */
 130static __always_inline __must_check unsigned long
 131__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 132{
 133        if (should_fail_usercopy())
 134                return n;
 135        instrument_copy_to_user(to, from, n);
 136        check_object_size(from, n, true);
 137        return raw_copy_to_user(to, from, n);
 138}
 139
 140static __always_inline __must_check unsigned long
 141__copy_to_user(void __user *to, const void *from, unsigned long n)
 142{
 143        might_fault();
 144        if (should_fail_usercopy())
 145                return n;
 146        instrument_copy_to_user(to, from, n);
 147        check_object_size(from, n, true);
 148        return raw_copy_to_user(to, from, n);
 149}
 150
 151#ifdef INLINE_COPY_FROM_USER
 152static inline __must_check unsigned long
 153_copy_from_user(void *to, const void __user *from, unsigned long n)
 154{
 155        unsigned long res = n;
 156        might_fault();
 157        if (!should_fail_usercopy() && likely(access_ok(from, n))) {
 158                instrument_copy_from_user(to, from, n);
 159                res = raw_copy_from_user(to, from, n);
 160        }
 161        if (unlikely(res))
 162                memset(to + (n - res), 0, res);
 163        return res;
 164}
 165#else
 166extern __must_check unsigned long
 167_copy_from_user(void *, const void __user *, unsigned long);
 168#endif
 169
 170#ifdef INLINE_COPY_TO_USER
 171static inline __must_check unsigned long
 172_copy_to_user(void __user *to, const void *from, unsigned long n)
 173{
 174        might_fault();
 175        if (should_fail_usercopy())
 176                return n;
 177        if (access_ok(to, n)) {
 178                instrument_copy_to_user(to, from, n);
 179                n = raw_copy_to_user(to, from, n);
 180        }
 181        return n;
 182}
 183#else
 184extern __must_check unsigned long
 185_copy_to_user(void __user *, const void *, unsigned long);
 186#endif
 187
 188static __always_inline unsigned long __must_check
 189copy_from_user(void *to, const void __user *from, unsigned long n)
 190{
 191        if (likely(check_copy_size(to, n, false)))
 192                n = _copy_from_user(to, from, n);
 193        return n;
 194}
 195
 196static __always_inline unsigned long __must_check
 197copy_to_user(void __user *to, const void *from, unsigned long n)
 198{
 199        if (likely(check_copy_size(from, n, true)))
 200                n = _copy_to_user(to, from, n);
 201        return n;
 202}
 203#ifdef CONFIG_COMPAT
 204static __always_inline unsigned long __must_check
 205copy_in_user(void __user *to, const void __user *from, unsigned long n)
 206{
 207        might_fault();
 208        if (access_ok(to, n) && access_ok(from, n))
 209                n = raw_copy_in_user(to, from, n);
 210        return n;
 211}
 212#endif
 213
 214#ifndef copy_mc_to_kernel
 215/*
 216 * Without arch opt-in this generic copy_mc_to_kernel() will not handle
 217 * #MC (or arch equivalent) during source read.
 218 */
 219static inline unsigned long __must_check
 220copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
 221{
 222        memcpy(dst, src, cnt);
 223        return 0;
 224}
 225#endif
 226
 227static __always_inline void pagefault_disabled_inc(void)
 228{
 229        current->pagefault_disabled++;
 230}
 231
 232static __always_inline void pagefault_disabled_dec(void)
 233{
 234        current->pagefault_disabled--;
 235}
 236
 237/*
 238 * These routines enable/disable the pagefault handler. If disabled, it will
 239 * not take any locks and go straight to the fixup table.
 240 *
 241 * User access methods will not sleep when called from a pagefault_disabled()
 242 * environment.
 243 */
 244static inline void pagefault_disable(void)
 245{
 246        pagefault_disabled_inc();
 247        /*
 248         * make sure to have issued the store before a pagefault
 249         * can hit.
 250         */
 251        barrier();
 252}
 253
 254static inline void pagefault_enable(void)
 255{
 256        /*
 257         * make sure to issue those last loads/stores before enabling
 258         * the pagefault handler again.
 259         */
 260        barrier();
 261        pagefault_disabled_dec();
 262}
 263
 264/*
 265 * Is the pagefault handler disabled? If so, user access methods will not sleep.
 266 */
 267static inline bool pagefault_disabled(void)
 268{
 269        return current->pagefault_disabled != 0;
 270}
 271
 272/*
 273 * The pagefault handler is in general disabled by pagefault_disable() or
 274 * when in irq context (via in_atomic()).
 275 *
 276 * This function should only be used by the fault handlers. Other users should
 277 * stick to pagefault_disabled().
 278 * Please NEVER use preempt_disable() to disable the fault handler. With
 279 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
 280 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
 281 */
 282#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
 283
 284#ifndef ARCH_HAS_NOCACHE_UACCESS
 285
 286static inline __must_check unsigned long
 287__copy_from_user_inatomic_nocache(void *to, const void __user *from,
 288                                  unsigned long n)
 289{
 290        return __copy_from_user_inatomic(to, from, n);
 291}
 292
 293#endif          /* ARCH_HAS_NOCACHE_UACCESS */
 294
 295extern __must_check int check_zeroed_user(const void __user *from, size_t size);
 296
 297/**
 298 * copy_struct_from_user: copy a struct from userspace
 299 * @dst:   Destination address, in kernel space. This buffer must be @ksize
 300 *         bytes long.
 301 * @ksize: Size of @dst struct.
 302 * @src:   Source address, in userspace.
 303 * @usize: (Alleged) size of @src struct.
 304 *
 305 * Copies a struct from userspace to kernel space, in a way that guarantees
 306 * backwards-compatibility for struct syscall arguments (as long as future
 307 * struct extensions are made such that all new fields are *appended* to the
 308 * old struct, and zeroed-out new fields have the same meaning as the old
 309 * struct).
 310 *
 311 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
 312 * The recommended usage is something like the following:
 313 *
 314 *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
 315 *   {
 316 *      int err;
 317 *      struct foo karg = {};
 318 *
 319 *      if (usize > PAGE_SIZE)
 320 *        return -E2BIG;
 321 *      if (usize < FOO_SIZE_VER0)
 322 *        return -EINVAL;
 323 *
 324 *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
 325 *      if (err)
 326 *        return err;
 327 *
 328 *      // ...
 329 *   }
 330 *
 331 * There are three cases to consider:
 332 *  * If @usize == @ksize, then it's copied verbatim.
 333 *  * If @usize < @ksize, then the userspace has passed an old struct to a
 334 *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
 335 *    are to be zero-filled.
 336 *  * If @usize > @ksize, then the userspace has passed a new struct to an
 337 *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
 338 *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
 339 *
 340 * Returns (in all cases, some data may have been copied):
 341 *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
 342 *  * -EFAULT: access to userspace failed.
 343 */
 344static __always_inline __must_check int
 345copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
 346                      size_t usize)
 347{
 348        size_t size = min(ksize, usize);
 349        size_t rest = max(ksize, usize) - size;
 350
 351        /* Deal with trailing bytes. */
 352        if (usize < ksize) {
 353                memset(dst + size, 0, rest);
 354        } else if (usize > ksize) {
 355                int ret = check_zeroed_user(src + size, rest);
 356                if (ret <= 0)
 357                        return ret ?: -E2BIG;
 358        }
 359        /* Copy the interoperable parts of the struct. */
 360        if (copy_from_user(dst, src, size))
 361                return -EFAULT;
 362        return 0;
 363}
 364
 365bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
 366
 367long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
 368long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
 369
 370long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
 371long notrace copy_to_user_nofault(void __user *dst, const void *src,
 372                size_t size);
 373
 374long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
 375                long count);
 376
 377long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
 378                long count);
 379long strnlen_user_nofault(const void __user *unsafe_addr, long count);
 380
 381/**
 382 * get_kernel_nofault(): safely attempt to read from a location
 383 * @val: read into this variable
 384 * @ptr: address to read from
 385 *
 386 * Returns 0 on success, or -EFAULT.
 387 */
 388#define get_kernel_nofault(val, ptr) ({                         \
 389        const typeof(val) *__gk_ptr = (ptr);                    \
 390        copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
 391})
 392
 393#ifndef user_access_begin
 394#define user_access_begin(ptr,len) access_ok(ptr, len)
 395#define user_access_end() do { } while (0)
 396#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
 397#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
 398#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
 399#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
 400#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
 401static inline unsigned long user_access_save(void) { return 0UL; }
 402static inline void user_access_restore(unsigned long flags) { }
 403#endif
 404#ifndef user_write_access_begin
 405#define user_write_access_begin user_access_begin
 406#define user_write_access_end user_access_end
 407#endif
 408#ifndef user_read_access_begin
 409#define user_read_access_begin user_access_begin
 410#define user_read_access_end user_access_end
 411#endif
 412
 413#ifdef CONFIG_HARDENED_USERCOPY
 414void usercopy_warn(const char *name, const char *detail, bool to_user,
 415                   unsigned long offset, unsigned long len);
 416void __noreturn usercopy_abort(const char *name, const char *detail,
 417                               bool to_user, unsigned long offset,
 418                               unsigned long len);
 419#endif
 420
 421#endif          /* __LINUX_UACCESS_H__ */
 422