1
2#ifndef _ASM_X86_UACCESS_H
3#define _ASM_X86_UACCESS_H
4
5
6
7#include <linux/compiler.h>
8#include <linux/kasan-checks.h>
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
12#include <asm/smap.h>
13#include <asm/extable.h>
14
15
16
17
18
19
20
21
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(-1UL)
26#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
27
28#define get_ds() (KERNEL_DS)
29#define get_fs() (current->thread.addr_limit)
30static inline void set_fs(mm_segment_t fs)
31{
32 current->thread.addr_limit = fs;
33
34 set_thread_flag(TIF_FSCHECK);
35}
36
37#define segment_eq(a, b) ((a).seg == (b).seg)
38
39#define user_addr_max() (current->thread.addr_limit.seg)
40#define __addr_ok(addr) \
41 ((unsigned long __force)(addr) < user_addr_max())
42
43
44
45
46
47static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
48{
49
50
51
52
53
54
55
56 if (__builtin_constant_p(size))
57 return unlikely(addr > limit - size);
58
59
60 addr += size;
61 if (unlikely(addr < size))
62 return true;
63 return unlikely(addr > limit);
64}
65
66#define __range_not_ok(addr, size, limit) \
67({ \
68 __chk_user_ptr(addr); \
69 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
70})
71
72#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
73# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
74#else
75# define WARN_ON_IN_IRQ()
76#endif
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98#define access_ok(type, addr, size) \
99({ \
100 WARN_ON_IN_IRQ(); \
101 likely(!__range_not_ok(addr, size, user_addr_max())); \
102})
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119extern int __get_user_1(void);
120extern int __get_user_2(void);
121extern int __get_user_4(void);
122extern int __get_user_8(void);
123extern int __get_user_bad(void);
124
125#define __uaccess_begin() stac()
126#define __uaccess_end() clac()
127#define __uaccess_begin_nospec() \
128({ \
129 stac(); \
130 barrier_nospec(); \
131})
132
133
134
135
136
137#define __inttype(x) \
138__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171#define get_user(x, ptr) \
172({ \
173 int __ret_gu; \
174 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
175 __chk_user_ptr(ptr); \
176 might_fault(); \
177 asm volatile("call __get_user_%P4" \
178 : "=a" (__ret_gu), "=r" (__val_gu), \
179 ASM_CALL_CONSTRAINT \
180 : "0" (ptr), "i" (sizeof(*(ptr)))); \
181 (x) = (__force __typeof__(*(ptr))) __val_gu; \
182 __builtin_expect(__ret_gu, 0); \
183})
184
185#define __put_user_x(size, x, ptr, __ret_pu) \
186 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
187 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
188
189
190
191#ifdef CONFIG_X86_32
192#define __put_user_asm_u64(x, addr, err, errret) \
193 asm volatile("\n" \
194 "1: movl %%eax,0(%2)\n" \
195 "2: movl %%edx,4(%2)\n" \
196 "3:" \
197 ".section .fixup,\"ax\"\n" \
198 "4: movl %3,%0\n" \
199 " jmp 3b\n" \
200 ".previous\n" \
201 _ASM_EXTABLE(1b, 4b) \
202 _ASM_EXTABLE(2b, 4b) \
203 : "=r" (err) \
204 : "A" (x), "r" (addr), "i" (errret), "0" (err))
205
206#define __put_user_asm_ex_u64(x, addr) \
207 asm volatile("\n" \
208 "1: movl %%eax,0(%1)\n" \
209 "2: movl %%edx,4(%1)\n" \
210 "3:" \
211 _ASM_EXTABLE_EX(1b, 2b) \
212 _ASM_EXTABLE_EX(2b, 3b) \
213 : : "A" (x), "r" (addr))
214
215#define __put_user_x8(x, ptr, __ret_pu) \
216 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
217 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
218#else
219#define __put_user_asm_u64(x, ptr, retval, errret) \
220 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
221#define __put_user_asm_ex_u64(x, addr) \
222 __put_user_asm_ex(x, addr, "q", "", "er")
223#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
224#endif
225
226extern void __put_user_bad(void);
227
228
229
230
231
232extern void __put_user_1(void);
233extern void __put_user_2(void);
234extern void __put_user_4(void);
235extern void __put_user_8(void);
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254#define put_user(x, ptr) \
255({ \
256 int __ret_pu; \
257 __typeof__(*(ptr)) __pu_val; \
258 __chk_user_ptr(ptr); \
259 might_fault(); \
260 __pu_val = x; \
261 switch (sizeof(*(ptr))) { \
262 case 1: \
263 __put_user_x(1, __pu_val, ptr, __ret_pu); \
264 break; \
265 case 2: \
266 __put_user_x(2, __pu_val, ptr, __ret_pu); \
267 break; \
268 case 4: \
269 __put_user_x(4, __pu_val, ptr, __ret_pu); \
270 break; \
271 case 8: \
272 __put_user_x8(__pu_val, ptr, __ret_pu); \
273 break; \
274 default: \
275 __put_user_x(X, __pu_val, ptr, __ret_pu); \
276 break; \
277 } \
278 __builtin_expect(__ret_pu, 0); \
279})
280
281#define __put_user_size(x, ptr, size, retval, errret) \
282do { \
283 retval = 0; \
284 __chk_user_ptr(ptr); \
285 switch (size) { \
286 case 1: \
287 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
288 break; \
289 case 2: \
290 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
291 break; \
292 case 4: \
293 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
294 break; \
295 case 8: \
296 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
297 errret); \
298 break; \
299 default: \
300 __put_user_bad(); \
301 } \
302} while (0)
303
304
305
306
307
308#define __put_user_size_ex(x, ptr, size) \
309do { \
310 __chk_user_ptr(ptr); \
311 switch (size) { \
312 case 1: \
313 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
314 break; \
315 case 2: \
316 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
317 break; \
318 case 4: \
319 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
320 break; \
321 case 8: \
322 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
323 break; \
324 default: \
325 __put_user_bad(); \
326 } \
327} while (0)
328
329#ifdef CONFIG_X86_32
330#define __get_user_asm_u64(x, ptr, retval, errret) \
331({ \
332 __typeof__(ptr) __ptr = (ptr); \
333 asm volatile("\n" \
334 "1: movl %2,%%eax\n" \
335 "2: movl %3,%%edx\n" \
336 "3:\n" \
337 ".section .fixup,\"ax\"\n" \
338 "4: mov %4,%0\n" \
339 " xorl %%eax,%%eax\n" \
340 " xorl %%edx,%%edx\n" \
341 " jmp 3b\n" \
342 ".previous\n" \
343 _ASM_EXTABLE(1b, 4b) \
344 _ASM_EXTABLE(2b, 4b) \
345 : "=r" (retval), "=&A"(x) \
346 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
347 "i" (errret), "0" (retval)); \
348})
349
350#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
351#else
352#define __get_user_asm_u64(x, ptr, retval, errret) \
353 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
354#define __get_user_asm_ex_u64(x, ptr) \
355 __get_user_asm_ex(x, ptr, "q", "", "=r")
356#endif
357
358#define __get_user_size(x, ptr, size, retval, errret) \
359do { \
360 retval = 0; \
361 __chk_user_ptr(ptr); \
362 switch (size) { \
363 case 1: \
364 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
365 break; \
366 case 2: \
367 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
368 break; \
369 case 4: \
370 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
371 break; \
372 case 8: \
373 __get_user_asm_u64(x, ptr, retval, errret); \
374 break; \
375 default: \
376 (x) = __get_user_bad(); \
377 } \
378} while (0)
379
380#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
381 asm volatile("\n" \
382 "1: mov"itype" %2,%"rtype"1\n" \
383 "2:\n" \
384 ".section .fixup,\"ax\"\n" \
385 "3: mov %3,%0\n" \
386 " xor"itype" %"rtype"1,%"rtype"1\n" \
387 " jmp 2b\n" \
388 ".previous\n" \
389 _ASM_EXTABLE(1b, 3b) \
390 : "=r" (err), ltype(x) \
391 : "m" (__m(addr)), "i" (errret), "0" (err))
392
393#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
394 asm volatile("\n" \
395 "1: mov"itype" %2,%"rtype"1\n" \
396 "2:\n" \
397 ".section .fixup,\"ax\"\n" \
398 "3: mov %3,%0\n" \
399 " jmp 2b\n" \
400 ".previous\n" \
401 _ASM_EXTABLE(1b, 3b) \
402 : "=r" (err), ltype(x) \
403 : "m" (__m(addr)), "i" (errret), "0" (err))
404
405
406
407
408
409#define __get_user_size_ex(x, ptr, size) \
410do { \
411 __chk_user_ptr(ptr); \
412 switch (size) { \
413 case 1: \
414 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
415 break; \
416 case 2: \
417 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
418 break; \
419 case 4: \
420 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
421 break; \
422 case 8: \
423 __get_user_asm_ex_u64(x, ptr); \
424 break; \
425 default: \
426 (x) = __get_user_bad(); \
427 } \
428} while (0)
429
430#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
431 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
432 "2:\n" \
433 ".section .fixup,\"ax\"\n" \
434 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
435 " jmp 2b\n" \
436 ".previous\n" \
437 _ASM_EXTABLE_EX(1b, 3b) \
438 : ltype(x) : "m" (__m(addr)))
439
440#define __put_user_nocheck(x, ptr, size) \
441({ \
442 int __pu_err; \
443 __uaccess_begin(); \
444 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
445 __uaccess_end(); \
446 __builtin_expect(__pu_err, 0); \
447})
448
449#define __get_user_nocheck(x, ptr, size) \
450({ \
451 int __gu_err; \
452 __inttype(*(ptr)) __gu_val; \
453 __uaccess_begin_nospec(); \
454 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
455 __uaccess_end(); \
456 (x) = (__force __typeof__(*(ptr)))__gu_val; \
457 __builtin_expect(__gu_err, 0); \
458})
459
460
461struct __large_struct { unsigned long buf[100]; };
462#define __m(x) (*(struct __large_struct __user *)(x))
463
464
465
466
467
468
469#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
470 asm volatile("\n" \
471 "1: mov"itype" %"rtype"1,%2\n" \
472 "2:\n" \
473 ".section .fixup,\"ax\"\n" \
474 "3: mov %3,%0\n" \
475 " jmp 2b\n" \
476 ".previous\n" \
477 _ASM_EXTABLE(1b, 3b) \
478 : "=r"(err) \
479 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
480
481#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
482 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
483 "2:\n" \
484 _ASM_EXTABLE_EX(1b, 2b) \
485 : : ltype(x), "m" (__m(addr)))
486
487
488
489
490#define uaccess_try do { \
491 current->thread.uaccess_err = 0; \
492 __uaccess_begin(); \
493 barrier();
494
495#define uaccess_try_nospec do { \
496 current->thread.uaccess_err = 0; \
497 __uaccess_begin_nospec(); \
498
499#define uaccess_catch(err) \
500 __uaccess_end(); \
501 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
502} while (0)
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526#define __get_user(x, ptr) \
527 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550#define __put_user(x, ptr) \
551 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
552
553
554
555
556
557
558
559
560#define get_user_try uaccess_try_nospec
561#define get_user_catch(err) uaccess_catch(err)
562
563#define get_user_ex(x, ptr) do { \
564 unsigned long __gue_val; \
565 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
566 (x) = (__force __typeof__(*(ptr)))__gue_val; \
567} while (0)
568
569#define put_user_try uaccess_try
570#define put_user_catch(err) uaccess_catch(err)
571
572#define put_user_ex(x, ptr) \
573 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
574
575extern unsigned long
576copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
577extern __must_check long
578strncpy_from_user(char *dst, const char __user *src, long count);
579
580extern __must_check long strnlen_user(const char __user *str, long n);
581
582unsigned long __must_check clear_user(void __user *mem, unsigned long len);
583unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
584
585extern void __cmpxchg_wrong_size(void)
586 __compiletime_error("Bad argument size for cmpxchg");
587
588#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
589({ \
590 int __ret = 0; \
591 __typeof__(ptr) __uval = (uval); \
592 __typeof__(*(ptr)) __old = (old); \
593 __typeof__(*(ptr)) __new = (new); \
594 __uaccess_begin_nospec(); \
595 switch (size) { \
596 case 1: \
597 { \
598 asm volatile("\n" \
599 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
600 "2:\n" \
601 "\t.section .fixup, \"ax\"\n" \
602 "3:\tmov %3, %0\n" \
603 "\tjmp 2b\n" \
604 "\t.previous\n" \
605 _ASM_EXTABLE(1b, 3b) \
606 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
607 : "i" (-EFAULT), "q" (__new), "1" (__old) \
608 : "memory" \
609 ); \
610 break; \
611 } \
612 case 2: \
613 { \
614 asm volatile("\n" \
615 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
616 "2:\n" \
617 "\t.section .fixup, \"ax\"\n" \
618 "3:\tmov %3, %0\n" \
619 "\tjmp 2b\n" \
620 "\t.previous\n" \
621 _ASM_EXTABLE(1b, 3b) \
622 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
623 : "i" (-EFAULT), "r" (__new), "1" (__old) \
624 : "memory" \
625 ); \
626 break; \
627 } \
628 case 4: \
629 { \
630 asm volatile("\n" \
631 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
632 "2:\n" \
633 "\t.section .fixup, \"ax\"\n" \
634 "3:\tmov %3, %0\n" \
635 "\tjmp 2b\n" \
636 "\t.previous\n" \
637 _ASM_EXTABLE(1b, 3b) \
638 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
639 : "i" (-EFAULT), "r" (__new), "1" (__old) \
640 : "memory" \
641 ); \
642 break; \
643 } \
644 case 8: \
645 { \
646 if (!IS_ENABLED(CONFIG_X86_64)) \
647 __cmpxchg_wrong_size(); \
648 \
649 asm volatile("\n" \
650 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
651 "2:\n" \
652 "\t.section .fixup, \"ax\"\n" \
653 "3:\tmov %3, %0\n" \
654 "\tjmp 2b\n" \
655 "\t.previous\n" \
656 _ASM_EXTABLE(1b, 3b) \
657 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
658 : "i" (-EFAULT), "r" (__new), "1" (__old) \
659 : "memory" \
660 ); \
661 break; \
662 } \
663 default: \
664 __cmpxchg_wrong_size(); \
665 } \
666 __uaccess_end(); \
667 *__uval = __old; \
668 __ret; \
669})
670
671#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
672({ \
673 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
674 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
675 (old), (new), sizeof(*(ptr))) : \
676 -EFAULT; \
677})
678
679
680
681
682#ifdef CONFIG_X86_INTEL_USERCOPY
683extern struct movsl_mask {
684 int mask;
685} ____cacheline_aligned_in_smp movsl_mask;
686#endif
687
688#define ARCH_HAS_NOCACHE_UACCESS 1
689
690#ifdef CONFIG_X86_32
691# include <asm/uaccess_32.h>
692#else
693# include <asm/uaccess_64.h>
694#endif
695
696
697
698
699
700
701
702
703#define __copy_from_user_nmi __copy_from_user_inatomic
704
705
706
707
708
709
710
711#define user_access_begin() __uaccess_begin()
712#define user_access_end() __uaccess_end()
713
714#define unsafe_put_user(x, ptr, err_label) \
715do { \
716 int __pu_err; \
717 __typeof__(*(ptr)) __pu_val = (x); \
718 __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
719 if (unlikely(__pu_err)) goto err_label; \
720} while (0)
721
722#define unsafe_get_user(x, ptr, err_label) \
723do { \
724 int __gu_err; \
725 __inttype(*(ptr)) __gu_val; \
726 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
727 (x) = (__force __typeof__(*(ptr)))__gu_val; \
728 if (unlikely(__gu_err)) goto err_label; \
729} while (0)
730
731#endif
732
733