1
2#ifndef _ASM_X86_UACCESS_64_H
3#define _ASM_X86_UACCESS_64_H
4
5
6
7
8#include <linux/compiler.h>
9#include <linux/lockdep.h>
10#include <linux/kasan-checks.h>
11#include <asm/alternative.h>
12#include <asm/cpufeatures.h>
13#include <asm/page.h>
14
15
16
17
18
19
20__must_check unsigned long
21copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22__must_check unsigned long
23copy_user_generic_string(void *to, const void *from, unsigned len);
24__must_check unsigned long
25copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26
27static __always_inline __must_check unsigned long
28copy_user_generic(void *to, const void *from, unsigned len)
29{
30 unsigned ret;
31
32
33
34
35
36
37 alternative_call_2(copy_user_generic_unrolled,
38 copy_user_generic_string,
39 X86_FEATURE_REP_GOOD,
40 copy_user_enhanced_fast_string,
41 X86_FEATURE_ERMS,
42 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43 "=d" (len)),
44 "1" (to), "2" (from), "3" (len)
45 : "memory", "rcx", "r8", "r9", "r10", "r11");
46 return ret;
47}
48
49static __always_inline __must_check unsigned long
50raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
51{
52 int ret = 0;
53
54 if (!__builtin_constant_p(size))
55 return copy_user_generic(dst, (__force void *)src, size);
56 switch (size) {
57 case 1:
58 __uaccess_begin_nospec();
59 __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
60 ret, "b", "b", "=q", 1);
61 __uaccess_end();
62 return ret;
63 case 2:
64 __uaccess_begin_nospec();
65 __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
66 ret, "w", "w", "=r", 2);
67 __uaccess_end();
68 return ret;
69 case 4:
70 __uaccess_begin_nospec();
71 __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
72 ret, "l", "k", "=r", 4);
73 __uaccess_end();
74 return ret;
75 case 8:
76 __uaccess_begin_nospec();
77 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
78 ret, "q", "", "=r", 8);
79 __uaccess_end();
80 return ret;
81 case 10:
82 __uaccess_begin_nospec();
83 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
84 ret, "q", "", "=r", 10);
85 if (likely(!ret))
86 __get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
87 (u16 __user *)(8 + (char __user *)src),
88 ret, "w", "w", "=r", 2);
89 __uaccess_end();
90 return ret;
91 case 16:
92 __uaccess_begin_nospec();
93 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
94 ret, "q", "", "=r", 16);
95 if (likely(!ret))
96 __get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
97 (u64 __user *)(8 + (char __user *)src),
98 ret, "q", "", "=r", 8);
99 __uaccess_end();
100 return ret;
101 default:
102 return copy_user_generic(dst, (__force void *)src, size);
103 }
104}
105
106static __always_inline __must_check unsigned long
107raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
108{
109 int ret = 0;
110
111 if (!__builtin_constant_p(size))
112 return copy_user_generic((__force void *)dst, src, size);
113 switch (size) {
114 case 1:
115 __uaccess_begin();
116 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
117 ret, "b", "b", "iq", 1);
118 __uaccess_end();
119 return ret;
120 case 2:
121 __uaccess_begin();
122 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
123 ret, "w", "w", "ir", 2);
124 __uaccess_end();
125 return ret;
126 case 4:
127 __uaccess_begin();
128 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
129 ret, "l", "k", "ir", 4);
130 __uaccess_end();
131 return ret;
132 case 8:
133 __uaccess_begin();
134 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
135 ret, "q", "", "er", 8);
136 __uaccess_end();
137 return ret;
138 case 10:
139 __uaccess_begin();
140 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
141 ret, "q", "", "er", 10);
142 if (likely(!ret)) {
143 asm("":::"memory");
144 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
145 ret, "w", "w", "ir", 2);
146 }
147 __uaccess_end();
148 return ret;
149 case 16:
150 __uaccess_begin();
151 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
152 ret, "q", "", "er", 16);
153 if (likely(!ret)) {
154 asm("":::"memory");
155 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
156 ret, "q", "", "er", 8);
157 }
158 __uaccess_end();
159 return ret;
160 default:
161 return copy_user_generic((__force void *)dst, src, size);
162 }
163}
164
165static __always_inline __must_check
166unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
167{
168 return copy_user_generic((__force void *)dst,
169 (__force void *)src, size);
170}
171
172extern long __copy_user_nocache(void *dst, const void __user *src,
173 unsigned size, int zerorest);
174
175extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
176extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
177 size_t len);
178
179static inline int
180__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
181 unsigned size)
182{
183 kasan_check_write(dst, size);
184 return __copy_user_nocache(dst, src, size, 0);
185}
186
187static inline int
188__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
189{
190 kasan_check_write(dst, size);
191 return __copy_user_flushcache(dst, src, size);
192}
193
194unsigned long
195copy_user_handle_tail(char *to, char *from, unsigned len);
196
197#endif
198