1
2#ifndef _ASM_X86_UACCESS_64_H
3#define _ASM_X86_UACCESS_64_H
4
5
6
7
8#include <linux/compiler.h>
9#include <linux/lockdep.h>
10#include <linux/kasan-checks.h>
11#include <asm/alternative.h>
12#include <asm/cpufeatures.h>
13#include <asm/page.h>
14
15
16
17
18
19
20__must_check unsigned long
21copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22__must_check unsigned long
23copy_user_generic_string(void *to, const void *from, unsigned len);
24__must_check unsigned long
25copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26
27static __always_inline __must_check unsigned long
28copy_user_generic(void *to, const void *from, unsigned len)
29{
30 unsigned ret;
31
32
33
34
35
36
37 alternative_call_2(copy_user_generic_unrolled,
38 copy_user_generic_string,
39 X86_FEATURE_REP_GOOD,
40 copy_user_enhanced_fast_string,
41 X86_FEATURE_ERMS,
42 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43 "=d" (len)),
44 "1" (to), "2" (from), "3" (len)
45 : "memory", "rcx", "r8", "r9", "r10", "r11");
46 return ret;
47}
48
49static __always_inline __must_check unsigned long
50copy_to_user_mcsafe(void *to, const void *from, unsigned len)
51{
52 unsigned long ret;
53
54 __uaccess_begin();
55
56
57
58
59
60 ret = __memcpy_mcsafe(to, from, len);
61 __uaccess_end();
62 return ret;
63}
64
65static __always_inline __must_check unsigned long
66raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
67{
68 int ret = 0;
69
70 if (!__builtin_constant_p(size))
71 return copy_user_generic(dst, (__force void *)src, size);
72 switch (size) {
73 case 1:
74 __uaccess_begin_nospec();
75 __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
76 ret, "b", "b", "=q", 1);
77 __uaccess_end();
78 return ret;
79 case 2:
80 __uaccess_begin_nospec();
81 __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
82 ret, "w", "w", "=r", 2);
83 __uaccess_end();
84 return ret;
85 case 4:
86 __uaccess_begin_nospec();
87 __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
88 ret, "l", "k", "=r", 4);
89 __uaccess_end();
90 return ret;
91 case 8:
92 __uaccess_begin_nospec();
93 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
94 ret, "q", "", "=r", 8);
95 __uaccess_end();
96 return ret;
97 case 10:
98 __uaccess_begin_nospec();
99 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
100 ret, "q", "", "=r", 10);
101 if (likely(!ret))
102 __get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
103 (u16 __user *)(8 + (char __user *)src),
104 ret, "w", "w", "=r", 2);
105 __uaccess_end();
106 return ret;
107 case 16:
108 __uaccess_begin_nospec();
109 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
110 ret, "q", "", "=r", 16);
111 if (likely(!ret))
112 __get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
113 (u64 __user *)(8 + (char __user *)src),
114 ret, "q", "", "=r", 8);
115 __uaccess_end();
116 return ret;
117 default:
118 return copy_user_generic(dst, (__force void *)src, size);
119 }
120}
121
122static __always_inline __must_check unsigned long
123raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
124{
125 int ret = 0;
126
127 if (!__builtin_constant_p(size))
128 return copy_user_generic((__force void *)dst, src, size);
129 switch (size) {
130 case 1:
131 __uaccess_begin();
132 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
133 ret, "b", "b", "iq", 1);
134 __uaccess_end();
135 return ret;
136 case 2:
137 __uaccess_begin();
138 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
139 ret, "w", "w", "ir", 2);
140 __uaccess_end();
141 return ret;
142 case 4:
143 __uaccess_begin();
144 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
145 ret, "l", "k", "ir", 4);
146 __uaccess_end();
147 return ret;
148 case 8:
149 __uaccess_begin();
150 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
151 ret, "q", "", "er", 8);
152 __uaccess_end();
153 return ret;
154 case 10:
155 __uaccess_begin();
156 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
157 ret, "q", "", "er", 10);
158 if (likely(!ret)) {
159 asm("":::"memory");
160 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
161 ret, "w", "w", "ir", 2);
162 }
163 __uaccess_end();
164 return ret;
165 case 16:
166 __uaccess_begin();
167 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
168 ret, "q", "", "er", 16);
169 if (likely(!ret)) {
170 asm("":::"memory");
171 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
172 ret, "q", "", "er", 8);
173 }
174 __uaccess_end();
175 return ret;
176 default:
177 return copy_user_generic((__force void *)dst, src, size);
178 }
179}
180
181static __always_inline __must_check
182unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
183{
184 return copy_user_generic((__force void *)dst,
185 (__force void *)src, size);
186}
187
188extern long __copy_user_nocache(void *dst, const void __user *src,
189 unsigned size, int zerorest);
190
191extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
192extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
193 size_t len);
194
195static inline int
196__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
197 unsigned size)
198{
199 kasan_check_write(dst, size);
200 return __copy_user_nocache(dst, src, size, 0);
201}
202
203static inline int
204__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
205{
206 kasan_check_write(dst, size);
207 return __copy_user_flushcache(dst, src, size);
208}
209
210unsigned long
211copy_user_handle_tail(char *to, char *from, unsigned len);
212
213unsigned long
214mcsafe_handle_tail(char *to, char *from, unsigned len);
215
216#endif
217