1#ifndef _ASM_X86_UACCESS_H
2#define _ASM_X86_UACCESS_H
3
4
5
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
12#include <asm/smap.h>
13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17
18
19
20
21
22
23
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(-1UL)
28#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
29
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a, b) ((a).seg == (b).seg)
35
36#define user_addr_max() (current_thread_info()->addr_limit.seg)
37#define __addr_ok(addr) \
38 ((unsigned long __force)(addr) < user_addr_max())
39
40
41
42
43
44static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
45{
46
47
48
49
50
51
52
53 if (__builtin_constant_p(size))
54 return unlikely(addr > limit - size);
55
56
57 addr += size;
58 if (unlikely(addr < size))
59 return true;
60 return unlikely(addr > limit);
61}
62
63#define __range_not_ok(addr, size, limit) \
64({ \
65 __chk_user_ptr(addr); \
66 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
67})
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89#define access_ok(type, addr, size) \
90 likely(!__range_not_ok(addr, size, user_addr_max()))
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105struct exception_table_entry {
106 int insn, fixup, handler;
107};
108
109#define ARCH_HAS_RELATIVE_EXTABLE
110
111#define swap_ex_entry_fixup(a, b, tmp, delta) \
112 do { \
113 (a)->fixup = (b)->fixup + (delta); \
114 (b)->fixup = (tmp).fixup - (delta); \
115 (a)->handler = (b)->handler + (delta); \
116 (b)->handler = (tmp).handler - (delta); \
117 } while (0)
118
119extern int fixup_exception(struct pt_regs *regs, int trapnr);
120extern bool ex_has_fault_handler(unsigned long ip);
121extern int early_fixup_exception(unsigned long *ip);
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138extern int __get_user_1(void);
139extern int __get_user_2(void);
140extern int __get_user_4(void);
141extern int __get_user_8(void);
142extern int __get_user_bad(void);
143
144#define __uaccess_begin() stac()
145#define __uaccess_end() clac()
146
147
148
149
150
151#define __inttype(x) \
152__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185#define get_user(x, ptr) \
186({ \
187 int __ret_gu; \
188 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
189 register void *__sp asm(_ASM_SP); \
190 __chk_user_ptr(ptr); \
191 might_fault(); \
192 asm volatile("call __get_user_%P4" \
193 : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \
194 : "0" (ptr), "i" (sizeof(*(ptr)))); \
195 (x) = (__force __typeof__(*(ptr))) __val_gu; \
196 __builtin_expect(__ret_gu, 0); \
197})
198
199#define __put_user_x(size, x, ptr, __ret_pu) \
200 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
201 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
202
203
204
205#ifdef CONFIG_X86_32
206#define __put_user_asm_u64(x, addr, err, errret) \
207 asm volatile("\n" \
208 "1: movl %%eax,0(%2)\n" \
209 "2: movl %%edx,4(%2)\n" \
210 "3:" \
211 ".section .fixup,\"ax\"\n" \
212 "4: movl %3,%0\n" \
213 " jmp 3b\n" \
214 ".previous\n" \
215 _ASM_EXTABLE(1b, 4b) \
216 _ASM_EXTABLE(2b, 4b) \
217 : "=r" (err) \
218 : "A" (x), "r" (addr), "i" (errret), "0" (err))
219
220#define __put_user_asm_ex_u64(x, addr) \
221 asm volatile("\n" \
222 "1: movl %%eax,0(%1)\n" \
223 "2: movl %%edx,4(%1)\n" \
224 "3:" \
225 _ASM_EXTABLE_EX(1b, 2b) \
226 _ASM_EXTABLE_EX(2b, 3b) \
227 : : "A" (x), "r" (addr))
228
229#define __put_user_x8(x, ptr, __ret_pu) \
230 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
231 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
232#else
233#define __put_user_asm_u64(x, ptr, retval, errret) \
234 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
235#define __put_user_asm_ex_u64(x, addr) \
236 __put_user_asm_ex(x, addr, "q", "", "er")
237#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
238#endif
239
240extern void __put_user_bad(void);
241
242
243
244
245
246extern void __put_user_1(void);
247extern void __put_user_2(void);
248extern void __put_user_4(void);
249extern void __put_user_8(void);
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268#define put_user(x, ptr) \
269({ \
270 int __ret_pu; \
271 __typeof__(*(ptr)) __pu_val; \
272 __chk_user_ptr(ptr); \
273 might_fault(); \
274 __pu_val = x; \
275 switch (sizeof(*(ptr))) { \
276 case 1: \
277 __put_user_x(1, __pu_val, ptr, __ret_pu); \
278 break; \
279 case 2: \
280 __put_user_x(2, __pu_val, ptr, __ret_pu); \
281 break; \
282 case 4: \
283 __put_user_x(4, __pu_val, ptr, __ret_pu); \
284 break; \
285 case 8: \
286 __put_user_x8(__pu_val, ptr, __ret_pu); \
287 break; \
288 default: \
289 __put_user_x(X, __pu_val, ptr, __ret_pu); \
290 break; \
291 } \
292 __builtin_expect(__ret_pu, 0); \
293})
294
295#define __put_user_size(x, ptr, size, retval, errret) \
296do { \
297 retval = 0; \
298 __chk_user_ptr(ptr); \
299 switch (size) { \
300 case 1: \
301 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
302 break; \
303 case 2: \
304 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
305 break; \
306 case 4: \
307 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
308 break; \
309 case 8: \
310 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
311 errret); \
312 break; \
313 default: \
314 __put_user_bad(); \
315 } \
316} while (0)
317
318
319
320
321
322#define __put_user_size_ex(x, ptr, size) \
323do { \
324 __chk_user_ptr(ptr); \
325 switch (size) { \
326 case 1: \
327 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
328 break; \
329 case 2: \
330 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
331 break; \
332 case 4: \
333 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
334 break; \
335 case 8: \
336 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
337 break; \
338 default: \
339 __put_user_bad(); \
340 } \
341} while (0)
342
343#ifdef CONFIG_X86_32
344#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
345#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
346#else
347#define __get_user_asm_u64(x, ptr, retval, errret) \
348 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
349#define __get_user_asm_ex_u64(x, ptr) \
350 __get_user_asm_ex(x, ptr, "q", "", "=r")
351#endif
352
353#define __get_user_size(x, ptr, size, retval, errret) \
354do { \
355 retval = 0; \
356 __chk_user_ptr(ptr); \
357 switch (size) { \
358 case 1: \
359 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
360 break; \
361 case 2: \
362 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
363 break; \
364 case 4: \
365 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
366 break; \
367 case 8: \
368 __get_user_asm_u64(x, ptr, retval, errret); \
369 break; \
370 default: \
371 (x) = __get_user_bad(); \
372 } \
373} while (0)
374
375#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
376 asm volatile("\n" \
377 "1: mov"itype" %2,%"rtype"1\n" \
378 "2:\n" \
379 ".section .fixup,\"ax\"\n" \
380 "3: mov %3,%0\n" \
381 " xor"itype" %"rtype"1,%"rtype"1\n" \
382 " jmp 2b\n" \
383 ".previous\n" \
384 _ASM_EXTABLE(1b, 3b) \
385 : "=r" (err), ltype(x) \
386 : "m" (__m(addr)), "i" (errret), "0" (err))
387
388
389
390
391
392#define __get_user_size_ex(x, ptr, size) \
393do { \
394 __chk_user_ptr(ptr); \
395 switch (size) { \
396 case 1: \
397 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
398 break; \
399 case 2: \
400 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
401 break; \
402 case 4: \
403 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
404 break; \
405 case 8: \
406 __get_user_asm_ex_u64(x, ptr); \
407 break; \
408 default: \
409 (x) = __get_user_bad(); \
410 } \
411} while (0)
412
413#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
414 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
415 "2:\n" \
416 _ASM_EXTABLE_EX(1b, 2b) \
417 : ltype(x) : "m" (__m(addr)))
418
419#define __put_user_nocheck(x, ptr, size) \
420({ \
421 int __pu_err; \
422 __uaccess_begin(); \
423 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
424 __uaccess_end(); \
425 __builtin_expect(__pu_err, 0); \
426})
427
428#define __get_user_nocheck(x, ptr, size) \
429({ \
430 int __gu_err; \
431 unsigned long __gu_val; \
432 __uaccess_begin(); \
433 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
434 __uaccess_end(); \
435 (x) = (__force __typeof__(*(ptr)))__gu_val; \
436 __builtin_expect(__gu_err, 0); \
437})
438
439
440struct __large_struct { unsigned long buf[100]; };
441#define __m(x) (*(struct __large_struct __user *)(x))
442
443
444
445
446
447
448#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
449 asm volatile("\n" \
450 "1: mov"itype" %"rtype"1,%2\n" \
451 "2:\n" \
452 ".section .fixup,\"ax\"\n" \
453 "3: mov %3,%0\n" \
454 " jmp 2b\n" \
455 ".previous\n" \
456 _ASM_EXTABLE(1b, 3b) \
457 : "=r"(err) \
458 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
459
460#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
461 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
462 "2:\n" \
463 _ASM_EXTABLE_EX(1b, 2b) \
464 : : ltype(x), "m" (__m(addr)))
465
466
467
468
469#define uaccess_try do { \
470 current_thread_info()->uaccess_err = 0; \
471 __uaccess_begin(); \
472 barrier();
473
474#define uaccess_catch(err) \
475 __uaccess_end(); \
476 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
477} while (0)
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501#define __get_user(x, ptr) \
502 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525#define __put_user(x, ptr) \
526 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
527
528#define __get_user_unaligned __get_user
529#define __put_user_unaligned __put_user
530
531
532
533
534
535
536
537
538#define get_user_try uaccess_try
539#define get_user_catch(err) uaccess_catch(err)
540
541#define get_user_ex(x, ptr) do { \
542 unsigned long __gue_val; \
543 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
544 (x) = (__force __typeof__(*(ptr)))__gue_val; \
545} while (0)
546
547#define put_user_try uaccess_try
548#define put_user_catch(err) uaccess_catch(err)
549
550#define put_user_ex(x, ptr) \
551 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
552
553extern unsigned long
554copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
555extern __must_check long
556strncpy_from_user(char *dst, const char __user *src, long count);
557
558extern __must_check long strlen_user(const char __user *str);
559extern __must_check long strnlen_user(const char __user *str, long n);
560
561unsigned long __must_check clear_user(void __user *mem, unsigned long len);
562unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
563
564extern void __cmpxchg_wrong_size(void)
565 __compiletime_error("Bad argument size for cmpxchg");
566
567#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
568({ \
569 int __ret = 0; \
570 __typeof__(ptr) __uval = (uval); \
571 __typeof__(*(ptr)) __old = (old); \
572 __typeof__(*(ptr)) __new = (new); \
573 __uaccess_begin(); \
574 switch (size) { \
575 case 1: \
576 { \
577 asm volatile("\n" \
578 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
579 "2:\n" \
580 "\t.section .fixup, \"ax\"\n" \
581 "3:\tmov %3, %0\n" \
582 "\tjmp 2b\n" \
583 "\t.previous\n" \
584 _ASM_EXTABLE(1b, 3b) \
585 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
586 : "i" (-EFAULT), "q" (__new), "1" (__old) \
587 : "memory" \
588 ); \
589 break; \
590 } \
591 case 2: \
592 { \
593 asm volatile("\n" \
594 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
595 "2:\n" \
596 "\t.section .fixup, \"ax\"\n" \
597 "3:\tmov %3, %0\n" \
598 "\tjmp 2b\n" \
599 "\t.previous\n" \
600 _ASM_EXTABLE(1b, 3b) \
601 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
602 : "i" (-EFAULT), "r" (__new), "1" (__old) \
603 : "memory" \
604 ); \
605 break; \
606 } \
607 case 4: \
608 { \
609 asm volatile("\n" \
610 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
611 "2:\n" \
612 "\t.section .fixup, \"ax\"\n" \
613 "3:\tmov %3, %0\n" \
614 "\tjmp 2b\n" \
615 "\t.previous\n" \
616 _ASM_EXTABLE(1b, 3b) \
617 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
618 : "i" (-EFAULT), "r" (__new), "1" (__old) \
619 : "memory" \
620 ); \
621 break; \
622 } \
623 case 8: \
624 { \
625 if (!IS_ENABLED(CONFIG_X86_64)) \
626 __cmpxchg_wrong_size(); \
627 \
628 asm volatile("\n" \
629 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
630 "2:\n" \
631 "\t.section .fixup, \"ax\"\n" \
632 "3:\tmov %3, %0\n" \
633 "\tjmp 2b\n" \
634 "\t.previous\n" \
635 _ASM_EXTABLE(1b, 3b) \
636 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
637 : "i" (-EFAULT), "r" (__new), "1" (__old) \
638 : "memory" \
639 ); \
640 break; \
641 } \
642 default: \
643 __cmpxchg_wrong_size(); \
644 } \
645 __uaccess_end(); \
646 *__uval = __old; \
647 __ret; \
648})
649
650#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
651({ \
652 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
653 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
654 (old), (new), sizeof(*(ptr))) : \
655 -EFAULT; \
656})
657
658
659
660
661#ifdef CONFIG_X86_INTEL_USERCOPY
662extern struct movsl_mask {
663 int mask;
664} ____cacheline_aligned_in_smp movsl_mask;
665#endif
666
667#define ARCH_HAS_NOCACHE_UACCESS 1
668
669#ifdef CONFIG_X86_32
670# include <asm/uaccess_32.h>
671#else
672# include <asm/uaccess_64.h>
673#endif
674
675unsigned long __must_check _copy_from_user(void *to, const void __user *from,
676 unsigned n);
677unsigned long __must_check _copy_to_user(void __user *to, const void *from,
678 unsigned n);
679
680#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
681# define copy_user_diag __compiletime_error
682#else
683# define copy_user_diag __compiletime_warning
684#endif
685
686extern void copy_user_diag("copy_from_user() buffer size is too small")
687copy_from_user_overflow(void);
688extern void copy_user_diag("copy_to_user() buffer size is too small")
689copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
690
691#undef copy_user_diag
692
693#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
694
695extern void
696__compiletime_warning("copy_from_user() buffer size is not provably correct")
697__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
698#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
699
700extern void
701__compiletime_warning("copy_to_user() buffer size is not provably correct")
702__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
703#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
704
705#else
706
707static inline void
708__copy_from_user_overflow(int size, unsigned long count)
709{
710 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
711}
712
713#define __copy_to_user_overflow __copy_from_user_overflow
714
715#endif
716
717static inline unsigned long __must_check
718copy_from_user(void *to, const void __user *from, unsigned long n)
719{
720 int sz = __compiletime_object_size(to);
721
722 might_fault();
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742 if (likely(sz < 0 || sz >= n))
743 n = _copy_from_user(to, from, n);
744 else if(__builtin_constant_p(n))
745 copy_from_user_overflow();
746 else
747 __copy_from_user_overflow(sz, n);
748
749 return n;
750}
751
752static inline unsigned long __must_check
753copy_to_user(void __user *to, const void *from, unsigned long n)
754{
755 int sz = __compiletime_object_size(from);
756
757 might_fault();
758
759
760 if (likely(sz < 0 || sz >= n))
761 n = _copy_to_user(to, from, n);
762 else if(__builtin_constant_p(n))
763 copy_to_user_overflow();
764 else
765 __copy_to_user_overflow(sz, n);
766
767 return n;
768}
769
770#undef __copy_from_user_overflow
771#undef __copy_to_user_overflow
772
773
774
775
776
777
778
779
780#define __copy_from_user_nmi __copy_from_user_inatomic
781
782
783
784
785
786
787
788#define user_access_begin() __uaccess_begin()
789#define user_access_end() __uaccess_end()
790
791#define unsafe_put_user(x, ptr) \
792({ \
793 int __pu_err; \
794 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
795 __builtin_expect(__pu_err, 0); \
796})
797
798#define unsafe_get_user(x, ptr) \
799({ \
800 int __gu_err; \
801 unsigned long __gu_val; \
802 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
803 (x) = (__force __typeof__(*(ptr)))__gu_val; \
804 __builtin_expect(__gu_err, 0); \
805})
806
807#endif
808
809