1
2#ifndef _ASM_X86_UACCESS_H
3#define _ASM_X86_UACCESS_H
4
5
6
7#include <linux/compiler.h>
8#include <linux/kasan-checks.h>
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
12#include <asm/smap.h>
13#include <asm/extable.h>
14
15
16
17
18
19static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
20{
21
22
23
24
25
26
27
28 if (__builtin_constant_p(size))
29 return unlikely(addr > limit - size);
30
31
32 addr += size;
33 if (unlikely(addr < size))
34 return true;
35 return unlikely(addr > limit);
36}
37
38#define __range_not_ok(addr, size, limit) \
39({ \
40 __chk_user_ptr(addr); \
41 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
42})
43
44#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
45static inline bool pagefault_disabled(void);
46# define WARN_ON_IN_IRQ() \
47 WARN_ON_ONCE(!in_task() && !pagefault_disabled())
48#else
49# define WARN_ON_IN_IRQ()
50#endif
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69#define access_ok(addr, size) \
70({ \
71 WARN_ON_IN_IRQ(); \
72 likely(!__range_not_ok(addr, size, TASK_SIZE_MAX)); \
73})
74
75extern int __get_user_1(void);
76extern int __get_user_2(void);
77extern int __get_user_4(void);
78extern int __get_user_8(void);
79extern int __get_user_nocheck_1(void);
80extern int __get_user_nocheck_2(void);
81extern int __get_user_nocheck_4(void);
82extern int __get_user_nocheck_8(void);
83extern int __get_user_bad(void);
84
85#define __uaccess_begin() stac()
86#define __uaccess_end() clac()
87#define __uaccess_begin_nospec() \
88({ \
89 stac(); \
90 barrier_nospec(); \
91})
92
93
94
95
96
97#define __inttype(x) __typeof__( \
98 __typefits(x,char, \
99 __typefits(x,short, \
100 __typefits(x,int, \
101 __typefits(x,long,0ULL)))))
102
103#define __typefits(x,type,not) \
104 __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124#define do_get_user_call(fn,x,ptr) \
125({ \
126 int __ret_gu; \
127 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
128 __chk_user_ptr(ptr); \
129 asm volatile("call __" #fn "_%P4" \
130 : "=a" (__ret_gu), "=r" (__val_gu), \
131 ASM_CALL_CONSTRAINT \
132 : "0" (ptr), "i" (sizeof(*(ptr)))); \
133 (x) = (__force __typeof__(*(ptr))) __val_gu; \
134 __builtin_expect(__ret_gu, 0); \
135})
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155#define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178#define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
179
180
181#ifdef CONFIG_X86_32
182#define __put_user_goto_u64(x, addr, label) \
183 asm_volatile_goto("\n" \
184 "1: movl %%eax,0(%1)\n" \
185 "2: movl %%edx,4(%1)\n" \
186 _ASM_EXTABLE_UA(1b, %l2) \
187 _ASM_EXTABLE_UA(2b, %l2) \
188 : : "A" (x), "r" (addr) \
189 : : label)
190
191#else
192#define __put_user_goto_u64(x, ptr, label) \
193 __put_user_goto(x, ptr, "q", "er", label)
194#endif
195
196extern void __put_user_bad(void);
197
198
199
200
201
202extern void __put_user_1(void);
203extern void __put_user_2(void);
204extern void __put_user_4(void);
205extern void __put_user_8(void);
206extern void __put_user_nocheck_1(void);
207extern void __put_user_nocheck_2(void);
208extern void __put_user_nocheck_4(void);
209extern void __put_user_nocheck_8(void);
210
211
212
213
214
215
216
217#define do_put_user_call(fn,x,ptr) \
218({ \
219 int __ret_pu; \
220 void __user *__ptr_pu; \
221 register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX); \
222 __chk_user_ptr(ptr); \
223 __ptr_pu = (ptr); \
224 __val_pu = (x); \
225 asm volatile("call __" #fn "_%P[size]" \
226 : "=c" (__ret_pu), \
227 ASM_CALL_CONSTRAINT \
228 : "0" (__ptr_pu), \
229 "r" (__val_pu), \
230 [size] "i" (sizeof(*(ptr))) \
231 :"ebx"); \
232 __builtin_expect(__ret_pu, 0); \
233})
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252#define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274#define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
275
276#define __put_user_size(x, ptr, size, label) \
277do { \
278 __chk_user_ptr(ptr); \
279 switch (size) { \
280 case 1: \
281 __put_user_goto(x, ptr, "b", "iq", label); \
282 break; \
283 case 2: \
284 __put_user_goto(x, ptr, "w", "ir", label); \
285 break; \
286 case 4: \
287 __put_user_goto(x, ptr, "l", "ir", label); \
288 break; \
289 case 8: \
290 __put_user_goto_u64(x, ptr, label); \
291 break; \
292 default: \
293 __put_user_bad(); \
294 } \
295} while (0)
296
297#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
298
299#ifdef CONFIG_X86_32
300#define __get_user_asm_u64(x, ptr, label) do { \
301 unsigned int __gu_low, __gu_high; \
302 const unsigned int __user *__gu_ptr; \
303 __gu_ptr = (const void __user *)(ptr); \
304 __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \
305 __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \
306 (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
307} while (0)
308#else
309#define __get_user_asm_u64(x, ptr, label) \
310 __get_user_asm(x, ptr, "q", "=r", label)
311#endif
312
313#define __get_user_size(x, ptr, size, label) \
314do { \
315 __chk_user_ptr(ptr); \
316 switch (size) { \
317 unsigned char x_u8__; \
318 case 1: \
319 __get_user_asm(x_u8__, ptr, "b", "=q", label); \
320 (x) = x_u8__; \
321 break; \
322 case 2: \
323 __get_user_asm(x, ptr, "w", "=r", label); \
324 break; \
325 case 4: \
326 __get_user_asm(x, ptr, "l", "=r", label); \
327 break; \
328 case 8: \
329 __get_user_asm_u64(x, ptr, label); \
330 break; \
331 default: \
332 (x) = __get_user_bad(); \
333 } \
334} while (0)
335
336#define __get_user_asm(x, addr, itype, ltype, label) \
337 asm_volatile_goto("\n" \
338 "1: mov"itype" %[umem],%[output]\n" \
339 _ASM_EXTABLE_UA(1b, %l2) \
340 : [output] ltype(x) \
341 : [umem] "m" (__m(addr)) \
342 : : label)
343
344#else
345
346#ifdef CONFIG_X86_32
347#define __get_user_asm_u64(x, ptr, retval) \
348({ \
349 __typeof__(ptr) __ptr = (ptr); \
350 asm volatile("\n" \
351 "1: movl %[lowbits],%%eax\n" \
352 "2: movl %[highbits],%%edx\n" \
353 "3:\n" \
354 ".section .fixup,\"ax\"\n" \
355 "4: mov %[efault],%[errout]\n" \
356 " xorl %%eax,%%eax\n" \
357 " xorl %%edx,%%edx\n" \
358 " jmp 3b\n" \
359 ".previous\n" \
360 _ASM_EXTABLE_UA(1b, 4b) \
361 _ASM_EXTABLE_UA(2b, 4b) \
362 : [errout] "=r" (retval), \
363 [output] "=&A"(x) \
364 : [lowbits] "m" (__m(__ptr)), \
365 [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
366 [efault] "i" (-EFAULT), "0" (retval)); \
367})
368
369#else
370#define __get_user_asm_u64(x, ptr, retval) \
371 __get_user_asm(x, ptr, retval, "q", "=r")
372#endif
373
374#define __get_user_size(x, ptr, size, retval) \
375do { \
376 unsigned char x_u8__; \
377 \
378 retval = 0; \
379 __chk_user_ptr(ptr); \
380 switch (size) { \
381 case 1: \
382 __get_user_asm(x_u8__, ptr, retval, "b", "=q"); \
383 (x) = x_u8__; \
384 break; \
385 case 2: \
386 __get_user_asm(x, ptr, retval, "w", "=r"); \
387 break; \
388 case 4: \
389 __get_user_asm(x, ptr, retval, "l", "=r"); \
390 break; \
391 case 8: \
392 __get_user_asm_u64(x, ptr, retval); \
393 break; \
394 default: \
395 (x) = __get_user_bad(); \
396 } \
397} while (0)
398
399#define __get_user_asm(x, addr, err, itype, ltype) \
400 asm volatile("\n" \
401 "1: mov"itype" %[umem],%[output]\n" \
402 "2:\n" \
403 ".section .fixup,\"ax\"\n" \
404 "3: mov %[efault],%[errout]\n" \
405 " xorl %k[output],%k[output]\n" \
406 " jmp 2b\n" \
407 ".previous\n" \
408 _ASM_EXTABLE_UA(1b, 3b) \
409 : [errout] "=r" (err), \
410 [output] ltype(x) \
411 : [umem] "m" (__m(addr)), \
412 [efault] "i" (-EFAULT), "0" (err))
413
414#endif
415
416
417struct __large_struct { unsigned long buf[100]; };
418#define __m(x) (*(struct __large_struct __user *)(x))
419
420
421
422
423
424
425#define __put_user_goto(x, addr, itype, ltype, label) \
426 asm_volatile_goto("\n" \
427 "1: mov"itype" %0,%1\n" \
428 _ASM_EXTABLE_UA(1b, %l2) \
429 : : ltype(x), "m" (__m(addr)) \
430 : : label)
431
432extern unsigned long
433copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
434extern __must_check long
435strncpy_from_user(char *dst, const char __user *src, long count);
436
437extern __must_check long strnlen_user(const char __user *str, long n);
438
439unsigned long __must_check clear_user(void __user *mem, unsigned long len);
440unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
441
442#ifdef CONFIG_ARCH_HAS_COPY_MC
443unsigned long __must_check
444copy_mc_to_kernel(void *to, const void *from, unsigned len);
445#define copy_mc_to_kernel copy_mc_to_kernel
446
447unsigned long __must_check
448copy_mc_to_user(void *to, const void *from, unsigned len);
449#endif
450
451
452
453
454#ifdef CONFIG_X86_INTEL_USERCOPY
455extern struct movsl_mask {
456 int mask;
457} ____cacheline_aligned_in_smp movsl_mask;
458#endif
459
460#define ARCH_HAS_NOCACHE_UACCESS 1
461
462#ifdef CONFIG_X86_32
463# include <asm/uaccess_32.h>
464#else
465# include <asm/uaccess_64.h>
466#endif
467
468
469
470
471
472
473
474static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
475{
476 if (unlikely(!access_ok(ptr,len)))
477 return 0;
478 __uaccess_begin_nospec();
479 return 1;
480}
481#define user_access_begin(a,b) user_access_begin(a,b)
482#define user_access_end() __uaccess_end()
483
484#define user_access_save() smap_save()
485#define user_access_restore(x) smap_restore(x)
486
487#define unsafe_put_user(x, ptr, label) \
488 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
489
490#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
491#define unsafe_get_user(x, ptr, err_label) \
492do { \
493 __inttype(*(ptr)) __gu_val; \
494 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
495 (x) = (__force __typeof__(*(ptr)))__gu_val; \
496} while (0)
497#else
498#define unsafe_get_user(x, ptr, err_label) \
499do { \
500 int __gu_err; \
501 __inttype(*(ptr)) __gu_val; \
502 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \
503 (x) = (__force __typeof__(*(ptr)))__gu_val; \
504 if (unlikely(__gu_err)) goto err_label; \
505} while (0)
506#endif
507
508
509
510
511
512#define unsafe_copy_loop(dst, src, len, type, label) \
513 while (len >= sizeof(type)) { \
514 unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
515 dst += sizeof(type); \
516 src += sizeof(type); \
517 len -= sizeof(type); \
518 }
519
520#define unsafe_copy_to_user(_dst,_src,_len,label) \
521do { \
522 char __user *__ucu_dst = (_dst); \
523 const char *__ucu_src = (_src); \
524 size_t __ucu_len = (_len); \
525 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
526 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
527 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
528 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
529} while (0)
530
531#define HAVE_GET_KERNEL_NOFAULT
532
533#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
534#define __get_kernel_nofault(dst, src, type, err_label) \
535 __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
536 sizeof(type), err_label)
537#else
538#define __get_kernel_nofault(dst, src, type, err_label) \
539do { \
540 int __kr_err; \
541 \
542 __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
543 sizeof(type), __kr_err); \
544 if (unlikely(__kr_err)) \
545 goto err_label; \
546} while (0)
547#endif
548
549#define __put_kernel_nofault(dst, src, type, err_label) \
550 __put_user_size(*((type *)(src)), (__force type __user *)(dst), \
551 sizeof(type), err_label)
552
553#endif
554
555