linux/arch/x86/include/asm/uaccess_32.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_UACCESS_32_H
   3#define _ASM_X86_UACCESS_32_H
   4
   5/*
   6 * User space memory access functions
   7 */
   8#include <linux/string.h>
   9#include <asm/asm.h>
  10#include <asm/page.h>
  11
  12unsigned long __must_check __copy_user_ll
  13                (void *to, const void *from, unsigned long n);
  14unsigned long __must_check __copy_from_user_ll_nocache_nozero
  15                (void *to, const void __user *from, unsigned long n);
  16
  17static __always_inline unsigned long __must_check
  18raw_copy_to_user(void __user *to, const void *from, unsigned long n)
  19{
  20        return __copy_user_ll((__force void *)to, from, n);
  21}
  22
  23static __always_inline unsigned long
  24raw_copy_from_user(void *to, const void __user *from, unsigned long n)
  25{
  26        if (__builtin_constant_p(n)) {
  27                unsigned long ret;
  28
  29                switch (n) {
  30                case 1:
  31                        ret = 0;
  32                        __uaccess_begin_nospec();
  33                        __get_user_asm_nozero(*(u8 *)to, from, ret,
  34                                              "b", "b", "=q", 1);
  35                        __uaccess_end();
  36                        return ret;
  37                case 2:
  38                        ret = 0;
  39                        __uaccess_begin_nospec();
  40                        __get_user_asm_nozero(*(u16 *)to, from, ret,
  41                                              "w", "w", "=r", 2);
  42                        __uaccess_end();
  43                        return ret;
  44                case 4:
  45                        ret = 0;
  46                        __uaccess_begin_nospec();
  47                        __get_user_asm_nozero(*(u32 *)to, from, ret,
  48                                              "l", "k", "=r", 4);
  49                        __uaccess_end();
  50                        return ret;
  51                }
  52        }
  53        return __copy_user_ll(to, (__force const void *)from, n);
  54}
  55
  56static __always_inline unsigned long
  57__copy_from_user_inatomic_nocache(void *to, const void __user *from,
  58                                  unsigned long n)
  59{
  60       return __copy_from_user_ll_nocache_nozero(to, from, n);
  61}
  62
  63#endif /* _ASM_X86_UACCESS_32_H */
  64