1#ifndef _ASM_X86_UACCESS_H
2#define _ASM_X86_UACCESS_H
3
4
5
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
12#include <asm/smap.h>
13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17
18
19
20
21
22
23
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(-1UL)
28#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
29
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a, b) ((a).seg == (b).seg)
35
36#define user_addr_max() (current_thread_info()->addr_limit.seg)
37#define __addr_ok(addr) \
38 ((unsigned long __force)(addr) < user_addr_max())
39
40
41
42
43
44static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
45{
46
47
48
49
50
51
52
53 if (__builtin_constant_p(size))
54 return unlikely(addr > limit - size);
55
56
57 addr += size;
58 if (unlikely(addr < size))
59 return true;
60 return unlikely(addr > limit);
61}
62
63#define __range_not_ok(addr, size, limit) \
64({ \
65 __chk_user_ptr(addr); \
66 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
67})
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89#define access_ok(type, addr, size) \
90 likely(!__range_not_ok(addr, size, user_addr_max()))
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106struct exception_table_entry {
107 int insn, fixup;
108};
109
110#define ARCH_HAS_SORT_EXTABLE
111#define ARCH_HAS_SEARCH_EXTABLE
112
113extern int fixup_exception(struct pt_regs *regs);
114extern int early_fixup_exception(unsigned long *ip);
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131extern int __get_user_1(void);
132extern int __get_user_2(void);
133extern int __get_user_4(void);
134extern int __get_user_8(void);
135extern int __get_user_bad(void);
136
137
138
139
140
141#define __inttype(x) \
142__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175#define get_user(x, ptr) \
176({ \
177 int __ret_gu; \
178 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
179 __chk_user_ptr(ptr); \
180 might_fault(); \
181 asm volatile("call __get_user_%P3" \
182 : "=a" (__ret_gu), "=r" (__val_gu) \
183 : "0" (ptr), "i" (sizeof(*(ptr)))); \
184 (x) = (__force __typeof__(*(ptr))) __val_gu; \
185 __builtin_expect(__ret_gu, 0); \
186})
187
188#define __put_user_x(size, x, ptr, __ret_pu) \
189 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
190 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
191
192
193
194#ifdef CONFIG_X86_32
195#define __put_user_asm_u64(x, addr, err, errret) \
196 asm volatile(ASM_STAC "\n" \
197 "1: movl %%eax,0(%2)\n" \
198 "2: movl %%edx,4(%2)\n" \
199 "3: " ASM_CLAC "\n" \
200 ".section .fixup,\"ax\"\n" \
201 "4: movl %3,%0\n" \
202 " jmp 3b\n" \
203 ".previous\n" \
204 _ASM_EXTABLE(1b, 4b) \
205 _ASM_EXTABLE(2b, 4b) \
206 : "=r" (err) \
207 : "A" (x), "r" (addr), "i" (errret), "0" (err))
208
209#define __put_user_asm_ex_u64(x, addr) \
210 asm volatile(ASM_STAC "\n" \
211 "1: movl %%eax,0(%1)\n" \
212 "2: movl %%edx,4(%1)\n" \
213 "3: " ASM_CLAC "\n" \
214 _ASM_EXTABLE_EX(1b, 2b) \
215 _ASM_EXTABLE_EX(2b, 3b) \
216 : : "A" (x), "r" (addr))
217
218#define __put_user_x8(x, ptr, __ret_pu) \
219 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
220 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
221#else
222#define __put_user_asm_u64(x, ptr, retval, errret) \
223 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
224#define __put_user_asm_ex_u64(x, addr) \
225 __put_user_asm_ex(x, addr, "q", "", "er")
226#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
227#endif
228
229extern void __put_user_bad(void);
230
231
232
233
234
235extern void __put_user_1(void);
236extern void __put_user_2(void);
237extern void __put_user_4(void);
238extern void __put_user_8(void);
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257#define put_user(x, ptr) \
258({ \
259 int __ret_pu; \
260 __typeof__(*(ptr)) __pu_val; \
261 __chk_user_ptr(ptr); \
262 might_fault(); \
263 __pu_val = x; \
264 switch (sizeof(*(ptr))) { \
265 case 1: \
266 __put_user_x(1, __pu_val, ptr, __ret_pu); \
267 break; \
268 case 2: \
269 __put_user_x(2, __pu_val, ptr, __ret_pu); \
270 break; \
271 case 4: \
272 __put_user_x(4, __pu_val, ptr, __ret_pu); \
273 break; \
274 case 8: \
275 __put_user_x8(__pu_val, ptr, __ret_pu); \
276 break; \
277 default: \
278 __put_user_x(X, __pu_val, ptr, __ret_pu); \
279 break; \
280 } \
281 __builtin_expect(__ret_pu, 0); \
282})
283
284#define __put_user_size(x, ptr, size, retval, errret) \
285do { \
286 retval = 0; \
287 __chk_user_ptr(ptr); \
288 switch (size) { \
289 case 1: \
290 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
291 break; \
292 case 2: \
293 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
294 break; \
295 case 4: \
296 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
297 break; \
298 case 8: \
299 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
300 errret); \
301 break; \
302 default: \
303 __put_user_bad(); \
304 } \
305} while (0)
306
307#define __put_user_size_ex(x, ptr, size) \
308do { \
309 __chk_user_ptr(ptr); \
310 switch (size) { \
311 case 1: \
312 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
313 break; \
314 case 2: \
315 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
316 break; \
317 case 4: \
318 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
319 break; \
320 case 8: \
321 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
322 break; \
323 default: \
324 __put_user_bad(); \
325 } \
326} while (0)
327
328#ifdef CONFIG_X86_32
329#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
330#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
331#else
332#define __get_user_asm_u64(x, ptr, retval, errret) \
333 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
334#define __get_user_asm_ex_u64(x, ptr) \
335 __get_user_asm_ex(x, ptr, "q", "", "=r")
336#endif
337
338#define __get_user_size(x, ptr, size, retval, errret) \
339do { \
340 retval = 0; \
341 __chk_user_ptr(ptr); \
342 switch (size) { \
343 case 1: \
344 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
345 break; \
346 case 2: \
347 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
348 break; \
349 case 4: \
350 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
351 break; \
352 case 8: \
353 __get_user_asm_u64(x, ptr, retval, errret); \
354 break; \
355 default: \
356 (x) = __get_user_bad(); \
357 } \
358} while (0)
359
360#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
361 asm volatile(ASM_STAC "\n" \
362 "1: mov"itype" %2,%"rtype"1\n" \
363 "2: " ASM_CLAC "\n" \
364 ".section .fixup,\"ax\"\n" \
365 "3: mov %3,%0\n" \
366 " xor"itype" %"rtype"1,%"rtype"1\n" \
367 " jmp 2b\n" \
368 ".previous\n" \
369 _ASM_EXTABLE(1b, 3b) \
370 : "=r" (err), ltype(x) \
371 : "m" (__m(addr)), "i" (errret), "0" (err))
372
373#define __get_user_size_ex(x, ptr, size) \
374do { \
375 __chk_user_ptr(ptr); \
376 switch (size) { \
377 case 1: \
378 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
379 break; \
380 case 2: \
381 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
382 break; \
383 case 4: \
384 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
385 break; \
386 case 8: \
387 __get_user_asm_ex_u64(x, ptr); \
388 break; \
389 default: \
390 (x) = __get_user_bad(); \
391 } \
392} while (0)
393
394#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
395 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
396 "2:\n" \
397 _ASM_EXTABLE_EX(1b, 2b) \
398 : ltype(x) : "m" (__m(addr)))
399
400#define __put_user_nocheck(x, ptr, size) \
401({ \
402 int __pu_err; \
403 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
404 __builtin_expect(__pu_err, 0); \
405})
406
407#define __get_user_nocheck(x, ptr, size) \
408({ \
409 int __gu_err; \
410 unsigned long __gu_val; \
411 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
412 (x) = (__force __typeof__(*(ptr)))__gu_val; \
413 __builtin_expect(__gu_err, 0); \
414})
415
416
417struct __large_struct { unsigned long buf[100]; };
418#define __m(x) (*(struct __large_struct __user *)(x))
419
420
421
422
423
424
425#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
426 asm volatile(ASM_STAC "\n" \
427 "1: mov"itype" %"rtype"1,%2\n" \
428 "2: " ASM_CLAC "\n" \
429 ".section .fixup,\"ax\"\n" \
430 "3: mov %3,%0\n" \
431 " jmp 2b\n" \
432 ".previous\n" \
433 _ASM_EXTABLE(1b, 3b) \
434 : "=r"(err) \
435 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
436
437#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
438 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
439 "2:\n" \
440 _ASM_EXTABLE_EX(1b, 2b) \
441 : : ltype(x), "m" (__m(addr)))
442
443
444
445
446#define uaccess_try do { \
447 current_thread_info()->uaccess_err = 0; \
448 stac(); \
449 barrier();
450
451#define uaccess_catch(err) \
452 clac(); \
453 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
454} while (0)
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478#define __get_user(x, ptr) \
479 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502#define __put_user(x, ptr) \
503 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
504
505#define __get_user_unaligned __get_user
506#define __put_user_unaligned __put_user
507
508
509
510
511
512
513
514
515#define get_user_try uaccess_try
516#define get_user_catch(err) uaccess_catch(err)
517
518#define get_user_ex(x, ptr) do { \
519 unsigned long __gue_val; \
520 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
521 (x) = (__force __typeof__(*(ptr)))__gue_val; \
522} while (0)
523
524#define put_user_try uaccess_try
525#define put_user_catch(err) uaccess_catch(err)
526
527#define put_user_ex(x, ptr) \
528 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
529
530extern unsigned long
531copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
532extern __must_check long
533strncpy_from_user(char *dst, const char __user *src, long count);
534
535extern __must_check long strlen_user(const char __user *str);
536extern __must_check long strnlen_user(const char __user *str, long n);
537
538unsigned long __must_check clear_user(void __user *mem, unsigned long len);
539unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
540
541extern void __cmpxchg_wrong_size(void)
542 __compiletime_error("Bad argument size for cmpxchg");
543
544#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
545({ \
546 int __ret = 0; \
547 __typeof__(ptr) __uval = (uval); \
548 __typeof__(*(ptr)) __old = (old); \
549 __typeof__(*(ptr)) __new = (new); \
550 switch (size) { \
551 case 1: \
552 { \
553 asm volatile("\t" ASM_STAC "\n" \
554 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
555 "2:\t" ASM_CLAC "\n" \
556 "\t.section .fixup, \"ax\"\n" \
557 "3:\tmov %3, %0\n" \
558 "\tjmp 2b\n" \
559 "\t.previous\n" \
560 _ASM_EXTABLE(1b, 3b) \
561 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
562 : "i" (-EFAULT), "q" (__new), "1" (__old) \
563 : "memory" \
564 ); \
565 break; \
566 } \
567 case 2: \
568 { \
569 asm volatile("\t" ASM_STAC "\n" \
570 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
571 "2:\t" ASM_CLAC "\n" \
572 "\t.section .fixup, \"ax\"\n" \
573 "3:\tmov %3, %0\n" \
574 "\tjmp 2b\n" \
575 "\t.previous\n" \
576 _ASM_EXTABLE(1b, 3b) \
577 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
578 : "i" (-EFAULT), "r" (__new), "1" (__old) \
579 : "memory" \
580 ); \
581 break; \
582 } \
583 case 4: \
584 { \
585 asm volatile("\t" ASM_STAC "\n" \
586 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
587 "2:\t" ASM_CLAC "\n" \
588 "\t.section .fixup, \"ax\"\n" \
589 "3:\tmov %3, %0\n" \
590 "\tjmp 2b\n" \
591 "\t.previous\n" \
592 _ASM_EXTABLE(1b, 3b) \
593 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
594 : "i" (-EFAULT), "r" (__new), "1" (__old) \
595 : "memory" \
596 ); \
597 break; \
598 } \
599 case 8: \
600 { \
601 if (!IS_ENABLED(CONFIG_X86_64)) \
602 __cmpxchg_wrong_size(); \
603 \
604 asm volatile("\t" ASM_STAC "\n" \
605 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
606 "2:\t" ASM_CLAC "\n" \
607 "\t.section .fixup, \"ax\"\n" \
608 "3:\tmov %3, %0\n" \
609 "\tjmp 2b\n" \
610 "\t.previous\n" \
611 _ASM_EXTABLE(1b, 3b) \
612 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
613 : "i" (-EFAULT), "r" (__new), "1" (__old) \
614 : "memory" \
615 ); \
616 break; \
617 } \
618 default: \
619 __cmpxchg_wrong_size(); \
620 } \
621 *__uval = __old; \
622 __ret; \
623})
624
625#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
626({ \
627 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
628 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
629 (old), (new), sizeof(*(ptr))) : \
630 -EFAULT; \
631})
632
633
634
635
636#ifdef CONFIG_X86_INTEL_USERCOPY
637extern struct movsl_mask {
638 int mask;
639} ____cacheline_aligned_in_smp movsl_mask;
640#endif
641
642#define ARCH_HAS_NOCACHE_UACCESS 1
643
644#ifdef CONFIG_X86_32
645# include <asm/uaccess_32.h>
646#else
647# include <asm/uaccess_64.h>
648#endif
649
650unsigned long __must_check _copy_from_user(void *to, const void __user *from,
651 unsigned n);
652unsigned long __must_check _copy_to_user(void __user *to, const void *from,
653 unsigned n);
654
655#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
656# define copy_user_diag __compiletime_error
657#else
658# define copy_user_diag __compiletime_warning
659#endif
660
661extern void copy_user_diag("copy_from_user() buffer size is too small")
662copy_from_user_overflow(void);
663extern void copy_user_diag("copy_to_user() buffer size is too small")
664copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
665
666#undef copy_user_diag
667
668#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
669
670extern void
671__compiletime_warning("copy_from_user() buffer size is not provably correct")
672__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
673#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
674
675extern void
676__compiletime_warning("copy_to_user() buffer size is not provably correct")
677__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
678#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
679
680#else
681
682static inline void
683__copy_from_user_overflow(int size, unsigned long count)
684{
685 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
686}
687
688#define __copy_to_user_overflow __copy_from_user_overflow
689
690#endif
691
692static inline unsigned long __must_check
693copy_from_user(void *to, const void __user *from, unsigned long n)
694{
695 int sz = __compiletime_object_size(to);
696
697 might_fault();
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717 if (likely(sz < 0 || sz >= n))
718 n = _copy_from_user(to, from, n);
719 else if(__builtin_constant_p(n))
720 copy_from_user_overflow();
721 else
722 __copy_from_user_overflow(sz, n);
723
724 return n;
725}
726
727static inline unsigned long __must_check
728copy_to_user(void __user *to, const void *from, unsigned long n)
729{
730 int sz = __compiletime_object_size(from);
731
732 might_fault();
733
734
735 if (likely(sz < 0 || sz >= n))
736 n = _copy_to_user(to, from, n);
737 else if(__builtin_constant_p(n))
738 copy_to_user_overflow();
739 else
740 __copy_to_user_overflow(sz, n);
741
742 return n;
743}
744
745#undef __copy_from_user_overflow
746#undef __copy_to_user_overflow
747
748#endif
749
750