1#ifndef _ASM_X86_UACCESS_H
2#define _ASM_X86_UACCESS_H
3
4
5
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
12#include <asm/smap.h>
13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17
18
19
20
21
22
23
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(-1UL)
28#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
29
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a, b) ((a).seg == (b).seg)
35
36#define user_addr_max() (current_thread_info()->addr_limit.seg)
37#define __addr_ok(addr) \
38 ((unsigned long __force)(addr) < user_addr_max())
39
40
41
42
43
44static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
45{
46
47
48
49
50
51
52
53 if (__builtin_constant_p(size))
54 return addr > limit - size;
55
56
57 addr += size;
58 if (addr < size)
59 return true;
60 return addr > limit;
61}
62
63#define __range_not_ok(addr, size, limit) \
64({ \
65 __chk_user_ptr(addr); \
66 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
67})
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88#define access_ok(type, addr, size) \
89 likely(!__range_not_ok(addr, size, user_addr_max()))
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105struct exception_table_entry {
106 int insn, fixup;
107};
108
109#define ARCH_HAS_SORT_EXTABLE
110#define ARCH_HAS_SEARCH_EXTABLE
111
112extern int fixup_exception(struct pt_regs *regs);
113extern int early_fixup_exception(unsigned long *ip);
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130extern int __get_user_1(void);
131extern int __get_user_2(void);
132extern int __get_user_4(void);
133extern int __get_user_8(void);
134extern int __get_user_bad(void);
135
136
137
138
139
140#define __inttype(x) \
141__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173#define get_user(x, ptr) \
174({ \
175 int __ret_gu; \
176 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
177 __chk_user_ptr(ptr); \
178 might_fault(); \
179 asm volatile("call __get_user_%P3" \
180 : "=a" (__ret_gu), "=r" (__val_gu) \
181 : "0" (ptr), "i" (sizeof(*(ptr)))); \
182 (x) = (__typeof__(*(ptr))) __val_gu; \
183 __ret_gu; \
184})
185
186#define __put_user_x(size, x, ptr, __ret_pu) \
187 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
188 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
189
190
191
192#ifdef CONFIG_X86_32
193#define __put_user_asm_u64(x, addr, err, errret) \
194 asm volatile(ASM_STAC "\n" \
195 "1: movl %%eax,0(%2)\n" \
196 "2: movl %%edx,4(%2)\n" \
197 "3: " ASM_CLAC "\n" \
198 ".section .fixup,\"ax\"\n" \
199 "4: movl %3,%0\n" \
200 " jmp 3b\n" \
201 ".previous\n" \
202 _ASM_EXTABLE(1b, 4b) \
203 _ASM_EXTABLE(2b, 4b) \
204 : "=r" (err) \
205 : "A" (x), "r" (addr), "i" (errret), "0" (err))
206
207#define __put_user_asm_ex_u64(x, addr) \
208 asm volatile(ASM_STAC "\n" \
209 "1: movl %%eax,0(%1)\n" \
210 "2: movl %%edx,4(%1)\n" \
211 "3: " ASM_CLAC "\n" \
212 _ASM_EXTABLE_EX(1b, 2b) \
213 _ASM_EXTABLE_EX(2b, 3b) \
214 : : "A" (x), "r" (addr))
215
216#define __put_user_x8(x, ptr, __ret_pu) \
217 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
218 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
219#else
220#define __put_user_asm_u64(x, ptr, retval, errret) \
221 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
222#define __put_user_asm_ex_u64(x, addr) \
223 __put_user_asm_ex(x, addr, "q", "", "er")
224#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
225#endif
226
227extern void __put_user_bad(void);
228
229
230
231
232
233extern void __put_user_1(void);
234extern void __put_user_2(void);
235extern void __put_user_4(void);
236extern void __put_user_8(void);
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254#define put_user(x, ptr) \
255({ \
256 int __ret_pu; \
257 __typeof__(*(ptr)) __pu_val; \
258 __chk_user_ptr(ptr); \
259 might_fault(); \
260 __pu_val = x; \
261 switch (sizeof(*(ptr))) { \
262 case 1: \
263 __put_user_x(1, __pu_val, ptr, __ret_pu); \
264 break; \
265 case 2: \
266 __put_user_x(2, __pu_val, ptr, __ret_pu); \
267 break; \
268 case 4: \
269 __put_user_x(4, __pu_val, ptr, __ret_pu); \
270 break; \
271 case 8: \
272 __put_user_x8(__pu_val, ptr, __ret_pu); \
273 break; \
274 default: \
275 __put_user_x(X, __pu_val, ptr, __ret_pu); \
276 break; \
277 } \
278 __ret_pu; \
279})
280
281#define __put_user_size(x, ptr, size, retval, errret) \
282do { \
283 retval = 0; \
284 __chk_user_ptr(ptr); \
285 switch (size) { \
286 case 1: \
287 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
288 break; \
289 case 2: \
290 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
291 break; \
292 case 4: \
293 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
294 break; \
295 case 8: \
296 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
297 errret); \
298 break; \
299 default: \
300 __put_user_bad(); \
301 } \
302} while (0)
303
304#define __put_user_size_ex(x, ptr, size) \
305do { \
306 __chk_user_ptr(ptr); \
307 switch (size) { \
308 case 1: \
309 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
310 break; \
311 case 2: \
312 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
313 break; \
314 case 4: \
315 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
316 break; \
317 case 8: \
318 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
319 break; \
320 default: \
321 __put_user_bad(); \
322 } \
323} while (0)
324
325#ifdef CONFIG_X86_32
326#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
327#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
328#else
329#define __get_user_asm_u64(x, ptr, retval, errret) \
330 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
331#define __get_user_asm_ex_u64(x, ptr) \
332 __get_user_asm_ex(x, ptr, "q", "", "=r")
333#endif
334
335#define __get_user_size(x, ptr, size, retval, errret) \
336do { \
337 retval = 0; \
338 __chk_user_ptr(ptr); \
339 switch (size) { \
340 case 1: \
341 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
342 break; \
343 case 2: \
344 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
345 break; \
346 case 4: \
347 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
348 break; \
349 case 8: \
350 __get_user_asm_u64(x, ptr, retval, errret); \
351 break; \
352 default: \
353 (x) = __get_user_bad(); \
354 } \
355} while (0)
356
357#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
358 asm volatile(ASM_STAC "\n" \
359 "1: mov"itype" %2,%"rtype"1\n" \
360 "2: " ASM_CLAC "\n" \
361 ".section .fixup,\"ax\"\n" \
362 "3: mov %3,%0\n" \
363 " xor"itype" %"rtype"1,%"rtype"1\n" \
364 " jmp 2b\n" \
365 ".previous\n" \
366 _ASM_EXTABLE(1b, 3b) \
367 : "=r" (err), ltype(x) \
368 : "m" (__m(addr)), "i" (errret), "0" (err))
369
370#define __get_user_size_ex(x, ptr, size) \
371do { \
372 __chk_user_ptr(ptr); \
373 switch (size) { \
374 case 1: \
375 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
376 break; \
377 case 2: \
378 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
379 break; \
380 case 4: \
381 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
382 break; \
383 case 8: \
384 __get_user_asm_ex_u64(x, ptr); \
385 break; \
386 default: \
387 (x) = __get_user_bad(); \
388 } \
389} while (0)
390
391#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
392 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
393 "2:\n" \
394 _ASM_EXTABLE_EX(1b, 2b) \
395 : ltype(x) : "m" (__m(addr)))
396
397#define __put_user_nocheck(x, ptr, size) \
398({ \
399 int __pu_err; \
400 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
401 __pu_err; \
402})
403
404#define __get_user_nocheck(x, ptr, size) \
405({ \
406 int __gu_err; \
407 unsigned long __gu_val; \
408 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
409 (x) = (__force __typeof__(*(ptr)))__gu_val; \
410 __gu_err; \
411})
412
413
414struct __large_struct { unsigned long buf[100]; };
415#define __m(x) (*(struct __large_struct __user *)(x))
416
417
418
419
420
421
422#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
423 asm volatile(ASM_STAC "\n" \
424 "1: mov"itype" %"rtype"1,%2\n" \
425 "2: " ASM_CLAC "\n" \
426 ".section .fixup,\"ax\"\n" \
427 "3: mov %3,%0\n" \
428 " jmp 2b\n" \
429 ".previous\n" \
430 _ASM_EXTABLE(1b, 3b) \
431 : "=r"(err) \
432 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
433
434#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
435 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
436 "2:\n" \
437 _ASM_EXTABLE_EX(1b, 2b) \
438 : : ltype(x), "m" (__m(addr)))
439
440
441
442
443#define uaccess_try do { \
444 current_thread_info()->uaccess_err = 0; \
445 stac(); \
446 barrier();
447
448#define uaccess_catch(err) \
449 clac(); \
450 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
451} while (0)
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474#define __get_user(x, ptr) \
475 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497#define __put_user(x, ptr) \
498 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
499
500#define __get_user_unaligned __get_user
501#define __put_user_unaligned __put_user
502
503
504
505
506
507
508
509
510#define get_user_try uaccess_try
511#define get_user_catch(err) uaccess_catch(err)
512
513#define get_user_ex(x, ptr) do { \
514 unsigned long __gue_val; \
515 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
516 (x) = (__force __typeof__(*(ptr)))__gue_val; \
517} while (0)
518
519#define put_user_try uaccess_try
520#define put_user_catch(err) uaccess_catch(err)
521
522#define put_user_ex(x, ptr) \
523 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
524
525extern unsigned long
526copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
527extern __must_check long
528strncpy_from_user(char *dst, const char __user *src, long count);
529
530extern __must_check long strlen_user(const char __user *str);
531extern __must_check long strnlen_user(const char __user *str, long n);
532
533unsigned long __must_check clear_user(void __user *mem, unsigned long len);
534unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
535
536extern void __cmpxchg_wrong_size(void)
537 __compiletime_error("Bad argument size for cmpxchg");
538
539#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
540({ \
541 int __ret = 0; \
542 __typeof__(ptr) __uval = (uval); \
543 __typeof__(*(ptr)) __old = (old); \
544 __typeof__(*(ptr)) __new = (new); \
545 switch (size) { \
546 case 1: \
547 { \
548 asm volatile("\t" ASM_STAC "\n" \
549 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
550 "2:\t" ASM_CLAC "\n" \
551 "\t.section .fixup, \"ax\"\n" \
552 "3:\tmov %3, %0\n" \
553 "\tjmp 2b\n" \
554 "\t.previous\n" \
555 _ASM_EXTABLE(1b, 3b) \
556 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
557 : "i" (-EFAULT), "q" (__new), "1" (__old) \
558 : "memory" \
559 ); \
560 break; \
561 } \
562 case 2: \
563 { \
564 asm volatile("\t" ASM_STAC "\n" \
565 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
566 "2:\t" ASM_CLAC "\n" \
567 "\t.section .fixup, \"ax\"\n" \
568 "3:\tmov %3, %0\n" \
569 "\tjmp 2b\n" \
570 "\t.previous\n" \
571 _ASM_EXTABLE(1b, 3b) \
572 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
573 : "i" (-EFAULT), "r" (__new), "1" (__old) \
574 : "memory" \
575 ); \
576 break; \
577 } \
578 case 4: \
579 { \
580 asm volatile("\t" ASM_STAC "\n" \
581 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
582 "2:\t" ASM_CLAC "\n" \
583 "\t.section .fixup, \"ax\"\n" \
584 "3:\tmov %3, %0\n" \
585 "\tjmp 2b\n" \
586 "\t.previous\n" \
587 _ASM_EXTABLE(1b, 3b) \
588 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
589 : "i" (-EFAULT), "r" (__new), "1" (__old) \
590 : "memory" \
591 ); \
592 break; \
593 } \
594 case 8: \
595 { \
596 if (!IS_ENABLED(CONFIG_X86_64)) \
597 __cmpxchg_wrong_size(); \
598 \
599 asm volatile("\t" ASM_STAC "\n" \
600 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
601 "2:\t" ASM_CLAC "\n" \
602 "\t.section .fixup, \"ax\"\n" \
603 "3:\tmov %3, %0\n" \
604 "\tjmp 2b\n" \
605 "\t.previous\n" \
606 _ASM_EXTABLE(1b, 3b) \
607 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
608 : "i" (-EFAULT), "r" (__new), "1" (__old) \
609 : "memory" \
610 ); \
611 break; \
612 } \
613 default: \
614 __cmpxchg_wrong_size(); \
615 } \
616 *__uval = __old; \
617 __ret; \
618})
619
620#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
621({ \
622 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
623 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
624 (old), (new), sizeof(*(ptr))) : \
625 -EFAULT; \
626})
627
628
629
630
631#ifdef CONFIG_X86_INTEL_USERCOPY
632extern struct movsl_mask {
633 int mask;
634} ____cacheline_aligned_in_smp movsl_mask;
635#endif
636
637#define ARCH_HAS_NOCACHE_UACCESS 1
638
639#ifdef CONFIG_X86_32
640# include <asm/uaccess_32.h>
641#else
642# include <asm/uaccess_64.h>
643#endif
644
645unsigned long __must_check _copy_from_user(void *to, const void __user *from,
646 unsigned n);
647unsigned long __must_check _copy_to_user(void __user *to, const void *from,
648 unsigned n);
649
650#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
651# define copy_user_diag __compiletime_error
652#else
653# define copy_user_diag __compiletime_warning
654#endif
655
656extern void copy_user_diag("copy_from_user() buffer size is too small")
657copy_from_user_overflow(void);
658extern void copy_user_diag("copy_to_user() buffer size is too small")
659copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
660
661#undef copy_user_diag
662
663#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
664
665extern void
666__compiletime_warning("copy_from_user() buffer size is not provably correct")
667__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
668#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
669
670extern void
671__compiletime_warning("copy_to_user() buffer size is not provably correct")
672__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
673#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
674
675#else
676
677static inline void
678__copy_from_user_overflow(int size, unsigned long count)
679{
680 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
681}
682
683#define __copy_to_user_overflow __copy_from_user_overflow
684
685#endif
686
687static inline unsigned long __must_check
688copy_from_user(void *to, const void __user *from, unsigned long n)
689{
690 int sz = __compiletime_object_size(to);
691
692 might_fault();
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712 if (likely(sz < 0 || sz >= n))
713 n = _copy_from_user(to, from, n);
714 else if(__builtin_constant_p(n))
715 copy_from_user_overflow();
716 else
717 __copy_from_user_overflow(sz, n);
718
719 return n;
720}
721
722static inline unsigned long __must_check
723copy_to_user(void __user *to, const void *from, unsigned long n)
724{
725 int sz = __compiletime_object_size(from);
726
727 might_fault();
728
729
730 if (likely(sz < 0 || sz >= n))
731 n = _copy_to_user(to, from, n);
732 else if(__builtin_constant_p(n))
733 copy_to_user_overflow();
734 else
735 __copy_to_user_overflow(sz, n);
736
737 return n;
738}
739
740#undef __copy_from_user_overflow
741#undef __copy_to_user_overflow
742
743#endif
744
745