1#ifndef _ASM_X86_UACCESS_H
2#define _ASM_X86_UACCESS_H
3
4
5
6#include <linux/compiler.h>
7#include <linux/kasan-checks.h>
8#include <linux/string.h>
9#include <asm/asm.h>
10#include <asm/page.h>
11#include <asm/smap.h>
12#include <asm/extable.h>
13
14
15
16
17
18
19
20
21
22#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
23
24#define KERNEL_DS MAKE_MM_SEG(-1UL)
25#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
26
27#define get_ds() (KERNEL_DS)
28#define get_fs() (current->thread.addr_limit)
29#define set_fs(x) (current->thread.addr_limit = (x))
30
31#define segment_eq(a, b) ((a).seg == (b).seg)
32
33#define user_addr_max() (current->thread.addr_limit.seg)
34#define __addr_ok(addr) \
35 ((unsigned long __force)(addr) < user_addr_max())
36
37
38
39
40
41static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
42{
43
44
45
46
47
48
49
50 if (__builtin_constant_p(size))
51 return unlikely(addr > limit - size);
52
53
54 addr += size;
55 if (unlikely(addr < size))
56 return true;
57 return unlikely(addr > limit);
58}
59
60#define __range_not_ok(addr, size, limit) \
61({ \
62 __chk_user_ptr(addr); \
63 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
64})
65
66#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
67# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
68#else
69# define WARN_ON_IN_IRQ()
70#endif
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92#define access_ok(type, addr, size) \
93({ \
94 WARN_ON_IN_IRQ(); \
95 likely(!__range_not_ok(addr, size, user_addr_max())); \
96})
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113extern int __get_user_1(void);
114extern int __get_user_2(void);
115extern int __get_user_4(void);
116extern int __get_user_8(void);
117extern int __get_user_bad(void);
118
119#define __uaccess_begin() stac()
120#define __uaccess_end() clac()
121
122
123
124
125
126#define __inttype(x) \
127__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160#define get_user(x, ptr) \
161({ \
162 int __ret_gu; \
163 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
164 register void *__sp asm(_ASM_SP); \
165 __chk_user_ptr(ptr); \
166 might_fault(); \
167 asm volatile("call __get_user_%P4" \
168 : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \
169 : "0" (ptr), "i" (sizeof(*(ptr)))); \
170 (x) = (__force __typeof__(*(ptr))) __val_gu; \
171 __builtin_expect(__ret_gu, 0); \
172})
173
174#define __put_user_x(size, x, ptr, __ret_pu) \
175 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
176 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
177
178
179
180#ifdef CONFIG_X86_32
181#define __put_user_asm_u64(x, addr, err, errret) \
182 asm volatile("\n" \
183 "1: movl %%eax,0(%2)\n" \
184 "2: movl %%edx,4(%2)\n" \
185 "3:" \
186 ".section .fixup,\"ax\"\n" \
187 "4: movl %3,%0\n" \
188 " jmp 3b\n" \
189 ".previous\n" \
190 _ASM_EXTABLE(1b, 4b) \
191 _ASM_EXTABLE(2b, 4b) \
192 : "=r" (err) \
193 : "A" (x), "r" (addr), "i" (errret), "0" (err))
194
195#define __put_user_asm_ex_u64(x, addr) \
196 asm volatile("\n" \
197 "1: movl %%eax,0(%1)\n" \
198 "2: movl %%edx,4(%1)\n" \
199 "3:" \
200 _ASM_EXTABLE_EX(1b, 2b) \
201 _ASM_EXTABLE_EX(2b, 3b) \
202 : : "A" (x), "r" (addr))
203
204#define __put_user_x8(x, ptr, __ret_pu) \
205 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
206 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
207#else
208#define __put_user_asm_u64(x, ptr, retval, errret) \
209 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
210#define __put_user_asm_ex_u64(x, addr) \
211 __put_user_asm_ex(x, addr, "q", "", "er")
212#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
213#endif
214
215extern void __put_user_bad(void);
216
217
218
219
220
221extern void __put_user_1(void);
222extern void __put_user_2(void);
223extern void __put_user_4(void);
224extern void __put_user_8(void);
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243#define put_user(x, ptr) \
244({ \
245 int __ret_pu; \
246 __typeof__(*(ptr)) __pu_val; \
247 __chk_user_ptr(ptr); \
248 might_fault(); \
249 __pu_val = x; \
250 switch (sizeof(*(ptr))) { \
251 case 1: \
252 __put_user_x(1, __pu_val, ptr, __ret_pu); \
253 break; \
254 case 2: \
255 __put_user_x(2, __pu_val, ptr, __ret_pu); \
256 break; \
257 case 4: \
258 __put_user_x(4, __pu_val, ptr, __ret_pu); \
259 break; \
260 case 8: \
261 __put_user_x8(__pu_val, ptr, __ret_pu); \
262 break; \
263 default: \
264 __put_user_x(X, __pu_val, ptr, __ret_pu); \
265 break; \
266 } \
267 __builtin_expect(__ret_pu, 0); \
268})
269
270#define __put_user_size(x, ptr, size, retval, errret) \
271do { \
272 retval = 0; \
273 __chk_user_ptr(ptr); \
274 switch (size) { \
275 case 1: \
276 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
277 break; \
278 case 2: \
279 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
280 break; \
281 case 4: \
282 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
283 break; \
284 case 8: \
285 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
286 errret); \
287 break; \
288 default: \
289 __put_user_bad(); \
290 } \
291} while (0)
292
293
294
295
296
297#define __put_user_size_ex(x, ptr, size) \
298do { \
299 __chk_user_ptr(ptr); \
300 switch (size) { \
301 case 1: \
302 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
303 break; \
304 case 2: \
305 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
306 break; \
307 case 4: \
308 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
309 break; \
310 case 8: \
311 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
312 break; \
313 default: \
314 __put_user_bad(); \
315 } \
316} while (0)
317
318#ifdef CONFIG_X86_32
319#define __get_user_asm_u64(x, ptr, retval, errret) \
320({ \
321 __typeof__(ptr) __ptr = (ptr); \
322 asm volatile("\n" \
323 "1: movl %2,%%eax\n" \
324 "2: movl %3,%%edx\n" \
325 "3:\n" \
326 ".section .fixup,\"ax\"\n" \
327 "4: mov %4,%0\n" \
328 " xorl %%eax,%%eax\n" \
329 " xorl %%edx,%%edx\n" \
330 " jmp 3b\n" \
331 ".previous\n" \
332 _ASM_EXTABLE(1b, 4b) \
333 _ASM_EXTABLE(2b, 4b) \
334 : "=r" (retval), "=&A"(x) \
335 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
336 "i" (errret), "0" (retval)); \
337})
338
339#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
340#else
341#define __get_user_asm_u64(x, ptr, retval, errret) \
342 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
343#define __get_user_asm_ex_u64(x, ptr) \
344 __get_user_asm_ex(x, ptr, "q", "", "=r")
345#endif
346
347#define __get_user_size(x, ptr, size, retval, errret) \
348do { \
349 retval = 0; \
350 __chk_user_ptr(ptr); \
351 switch (size) { \
352 case 1: \
353 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
354 break; \
355 case 2: \
356 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
357 break; \
358 case 4: \
359 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
360 break; \
361 case 8: \
362 __get_user_asm_u64(x, ptr, retval, errret); \
363 break; \
364 default: \
365 (x) = __get_user_bad(); \
366 } \
367} while (0)
368
369#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
370 asm volatile("\n" \
371 "1: mov"itype" %2,%"rtype"1\n" \
372 "2:\n" \
373 ".section .fixup,\"ax\"\n" \
374 "3: mov %3,%0\n" \
375 " xor"itype" %"rtype"1,%"rtype"1\n" \
376 " jmp 2b\n" \
377 ".previous\n" \
378 _ASM_EXTABLE(1b, 3b) \
379 : "=r" (err), ltype(x) \
380 : "m" (__m(addr)), "i" (errret), "0" (err))
381
382#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
383 asm volatile("\n" \
384 "1: mov"itype" %2,%"rtype"1\n" \
385 "2:\n" \
386 ".section .fixup,\"ax\"\n" \
387 "3: mov %3,%0\n" \
388 " jmp 2b\n" \
389 ".previous\n" \
390 _ASM_EXTABLE(1b, 3b) \
391 : "=r" (err), ltype(x) \
392 : "m" (__m(addr)), "i" (errret), "0" (err))
393
394
395
396
397
398#define __get_user_size_ex(x, ptr, size) \
399do { \
400 __chk_user_ptr(ptr); \
401 switch (size) { \
402 case 1: \
403 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
404 break; \
405 case 2: \
406 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
407 break; \
408 case 4: \
409 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
410 break; \
411 case 8: \
412 __get_user_asm_ex_u64(x, ptr); \
413 break; \
414 default: \
415 (x) = __get_user_bad(); \
416 } \
417} while (0)
418
419#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
420 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
421 "2:\n" \
422 ".section .fixup,\"ax\"\n" \
423 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
424 " jmp 2b\n" \
425 ".previous\n" \
426 _ASM_EXTABLE_EX(1b, 3b) \
427 : ltype(x) : "m" (__m(addr)))
428
429#define __put_user_nocheck(x, ptr, size) \
430({ \
431 int __pu_err; \
432 __uaccess_begin(); \
433 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
434 __uaccess_end(); \
435 __builtin_expect(__pu_err, 0); \
436})
437
438#define __get_user_nocheck(x, ptr, size) \
439({ \
440 int __gu_err; \
441 __inttype(*(ptr)) __gu_val; \
442 __uaccess_begin(); \
443 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
444 __uaccess_end(); \
445 (x) = (__force __typeof__(*(ptr)))__gu_val; \
446 __builtin_expect(__gu_err, 0); \
447})
448
449
450struct __large_struct { unsigned long buf[100]; };
451#define __m(x) (*(struct __large_struct __user *)(x))
452
453
454
455
456
457
458#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
459 asm volatile("\n" \
460 "1: mov"itype" %"rtype"1,%2\n" \
461 "2:\n" \
462 ".section .fixup,\"ax\"\n" \
463 "3: mov %3,%0\n" \
464 " jmp 2b\n" \
465 ".previous\n" \
466 _ASM_EXTABLE(1b, 3b) \
467 : "=r"(err) \
468 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
469
470#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
471 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
472 "2:\n" \
473 _ASM_EXTABLE_EX(1b, 2b) \
474 : : ltype(x), "m" (__m(addr)))
475
476
477
478
479#define uaccess_try do { \
480 current->thread.uaccess_err = 0; \
481 __uaccess_begin(); \
482 barrier();
483
484#define uaccess_catch(err) \
485 __uaccess_end(); \
486 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
487} while (0)
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511#define __get_user(x, ptr) \
512 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535#define __put_user(x, ptr) \
536 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
537
538
539
540
541
542
543
544
545#define get_user_try uaccess_try
546#define get_user_catch(err) uaccess_catch(err)
547
548#define get_user_ex(x, ptr) do { \
549 unsigned long __gue_val; \
550 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
551 (x) = (__force __typeof__(*(ptr)))__gue_val; \
552} while (0)
553
554#define put_user_try uaccess_try
555#define put_user_catch(err) uaccess_catch(err)
556
557#define put_user_ex(x, ptr) \
558 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
559
560extern unsigned long
561copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
562extern __must_check long
563strncpy_from_user(char *dst, const char __user *src, long count);
564
565extern __must_check long strnlen_user(const char __user *str, long n);
566
567unsigned long __must_check clear_user(void __user *mem, unsigned long len);
568unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
569
570extern void __cmpxchg_wrong_size(void)
571 __compiletime_error("Bad argument size for cmpxchg");
572
573#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
574({ \
575 int __ret = 0; \
576 __typeof__(ptr) __uval = (uval); \
577 __typeof__(*(ptr)) __old = (old); \
578 __typeof__(*(ptr)) __new = (new); \
579 __uaccess_begin(); \
580 switch (size) { \
581 case 1: \
582 { \
583 asm volatile("\n" \
584 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
585 "2:\n" \
586 "\t.section .fixup, \"ax\"\n" \
587 "3:\tmov %3, %0\n" \
588 "\tjmp 2b\n" \
589 "\t.previous\n" \
590 _ASM_EXTABLE(1b, 3b) \
591 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
592 : "i" (-EFAULT), "q" (__new), "1" (__old) \
593 : "memory" \
594 ); \
595 break; \
596 } \
597 case 2: \
598 { \
599 asm volatile("\n" \
600 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
601 "2:\n" \
602 "\t.section .fixup, \"ax\"\n" \
603 "3:\tmov %3, %0\n" \
604 "\tjmp 2b\n" \
605 "\t.previous\n" \
606 _ASM_EXTABLE(1b, 3b) \
607 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
608 : "i" (-EFAULT), "r" (__new), "1" (__old) \
609 : "memory" \
610 ); \
611 break; \
612 } \
613 case 4: \
614 { \
615 asm volatile("\n" \
616 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
617 "2:\n" \
618 "\t.section .fixup, \"ax\"\n" \
619 "3:\tmov %3, %0\n" \
620 "\tjmp 2b\n" \
621 "\t.previous\n" \
622 _ASM_EXTABLE(1b, 3b) \
623 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
624 : "i" (-EFAULT), "r" (__new), "1" (__old) \
625 : "memory" \
626 ); \
627 break; \
628 } \
629 case 8: \
630 { \
631 if (!IS_ENABLED(CONFIG_X86_64)) \
632 __cmpxchg_wrong_size(); \
633 \
634 asm volatile("\n" \
635 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
636 "2:\n" \
637 "\t.section .fixup, \"ax\"\n" \
638 "3:\tmov %3, %0\n" \
639 "\tjmp 2b\n" \
640 "\t.previous\n" \
641 _ASM_EXTABLE(1b, 3b) \
642 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
643 : "i" (-EFAULT), "r" (__new), "1" (__old) \
644 : "memory" \
645 ); \
646 break; \
647 } \
648 default: \
649 __cmpxchg_wrong_size(); \
650 } \
651 __uaccess_end(); \
652 *__uval = __old; \
653 __ret; \
654})
655
656#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
657({ \
658 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
659 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
660 (old), (new), sizeof(*(ptr))) : \
661 -EFAULT; \
662})
663
664
665
666
667#ifdef CONFIG_X86_INTEL_USERCOPY
668extern struct movsl_mask {
669 int mask;
670} ____cacheline_aligned_in_smp movsl_mask;
671#endif
672
673#define ARCH_HAS_NOCACHE_UACCESS 1
674
675#ifdef CONFIG_X86_32
676# include <asm/uaccess_32.h>
677#else
678# include <asm/uaccess_64.h>
679#endif
680
681
682
683
684
685
686
687
688#define __copy_from_user_nmi __copy_from_user_inatomic
689
690
691
692
693
694
695
696#define user_access_begin() __uaccess_begin()
697#define user_access_end() __uaccess_end()
698
699#define unsafe_put_user(x, ptr, err_label) \
700do { \
701 int __pu_err; \
702 __typeof__(*(ptr)) __pu_val = (x); \
703 __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
704 if (unlikely(__pu_err)) goto err_label; \
705} while (0)
706
707#define unsafe_get_user(x, ptr, err_label) \
708do { \
709 int __gu_err; \
710 __inttype(*(ptr)) __gu_val; \
711 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
712 (x) = (__force __typeof__(*(ptr)))__gu_val; \
713 if (unlikely(__gu_err)) goto err_label; \
714} while (0)
715
716#endif
717
718