1
2#ifndef _ASM_X86_UACCESS_H
3#define _ASM_X86_UACCESS_H
4
5
6
7#include <linux/compiler.h>
8#include <linux/kasan-checks.h>
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
12#include <asm/smap.h>
13#include <asm/extable.h>
14
15
16
17
18
19
20
21
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(-1UL)
26#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
27
28#define get_fs() (current->thread.addr_limit)
29static inline void set_fs(mm_segment_t fs)
30{
31 current->thread.addr_limit = fs;
32
33 set_thread_flag(TIF_FSCHECK);
34}
35
36#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
37#define user_addr_max() (current->thread.addr_limit.seg)
38
39
40
41
42
43static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
44{
45
46
47
48
49
50
51
52 if (__builtin_constant_p(size))
53 return unlikely(addr > limit - size);
54
55
56 addr += size;
57 if (unlikely(addr < size))
58 return true;
59 return unlikely(addr > limit);
60}
61
62#define __range_not_ok(addr, size, limit) \
63({ \
64 __chk_user_ptr(addr); \
65 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
66})
67
68#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
69static inline bool pagefault_disabled(void);
70# define WARN_ON_IN_IRQ() \
71 WARN_ON_ONCE(!in_task() && !pagefault_disabled())
72#else
73# define WARN_ON_IN_IRQ()
74#endif
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93#define access_ok(addr, size) \
94({ \
95 WARN_ON_IN_IRQ(); \
96 likely(!__range_not_ok(addr, size, user_addr_max())); \
97})
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114extern int __get_user_1(void);
115extern int __get_user_2(void);
116extern int __get_user_4(void);
117extern int __get_user_8(void);
118extern int __get_user_bad(void);
119
120#define __uaccess_begin() stac()
121#define __uaccess_end() clac()
122#define __uaccess_begin_nospec() \
123({ \
124 stac(); \
125 barrier_nospec(); \
126})
127
128
129
130
131
132#define __inttype(x) __typeof__( \
133 __typefits(x,char, \
134 __typefits(x,short, \
135 __typefits(x,int, \
136 __typefits(x,long,0ULL)))))
137
138#define __typefits(x,type,not) \
139 __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172#define get_user(x, ptr) \
173({ \
174 int __ret_gu; \
175 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
176 __chk_user_ptr(ptr); \
177 might_fault(); \
178 asm volatile("call __get_user_%P4" \
179 : "=a" (__ret_gu), "=r" (__val_gu), \
180 ASM_CALL_CONSTRAINT \
181 : "0" (ptr), "i" (sizeof(*(ptr)))); \
182 (x) = (__force __typeof__(*(ptr))) __val_gu; \
183 __builtin_expect(__ret_gu, 0); \
184})
185
186#define __put_user_x(size, x, ptr, __ret_pu) \
187 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
188 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
189
190
191
192#ifdef CONFIG_X86_32
193#define __put_user_goto_u64(x, addr, label) \
194 asm_volatile_goto("\n" \
195 "1: movl %%eax,0(%1)\n" \
196 "2: movl %%edx,4(%1)\n" \
197 _ASM_EXTABLE_UA(1b, %l2) \
198 _ASM_EXTABLE_UA(2b, %l2) \
199 : : "A" (x), "r" (addr) \
200 : : label)
201
202#define __put_user_x8(x, ptr, __ret_pu) \
203 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
204 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
205#else
206#define __put_user_goto_u64(x, ptr, label) \
207 __put_user_goto(x, ptr, "q", "er", label)
208#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
209#endif
210
211extern void __put_user_bad(void);
212
213
214
215
216
217extern void __put_user_1(void);
218extern void __put_user_2(void);
219extern void __put_user_4(void);
220extern void __put_user_8(void);
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239#define put_user(x, ptr) \
240({ \
241 int __ret_pu; \
242 __typeof__(*(ptr)) __pu_val; \
243 __chk_user_ptr(ptr); \
244 might_fault(); \
245 __pu_val = x; \
246 switch (sizeof(*(ptr))) { \
247 case 1: \
248 __put_user_x(1, __pu_val, ptr, __ret_pu); \
249 break; \
250 case 2: \
251 __put_user_x(2, __pu_val, ptr, __ret_pu); \
252 break; \
253 case 4: \
254 __put_user_x(4, __pu_val, ptr, __ret_pu); \
255 break; \
256 case 8: \
257 __put_user_x8(__pu_val, ptr, __ret_pu); \
258 break; \
259 default: \
260 __put_user_x(X, __pu_val, ptr, __ret_pu); \
261 break; \
262 } \
263 __builtin_expect(__ret_pu, 0); \
264})
265
266#define __put_user_size(x, ptr, size, label) \
267do { \
268 __chk_user_ptr(ptr); \
269 switch (size) { \
270 case 1: \
271 __put_user_goto(x, ptr, "b", "iq", label); \
272 break; \
273 case 2: \
274 __put_user_goto(x, ptr, "w", "ir", label); \
275 break; \
276 case 4: \
277 __put_user_goto(x, ptr, "l", "ir", label); \
278 break; \
279 case 8: \
280 __put_user_goto_u64(x, ptr, label); \
281 break; \
282 default: \
283 __put_user_bad(); \
284 } \
285} while (0)
286
287#ifdef CONFIG_X86_32
288#define __get_user_asm_u64(x, ptr, retval) \
289({ \
290 __typeof__(ptr) __ptr = (ptr); \
291 asm volatile("\n" \
292 "1: movl %[lowbits],%%eax\n" \
293 "2: movl %[highbits],%%edx\n" \
294 "3:\n" \
295 ".section .fixup,\"ax\"\n" \
296 "4: mov %[efault],%[errout]\n" \
297 " xorl %%eax,%%eax\n" \
298 " xorl %%edx,%%edx\n" \
299 " jmp 3b\n" \
300 ".previous\n" \
301 _ASM_EXTABLE_UA(1b, 4b) \
302 _ASM_EXTABLE_UA(2b, 4b) \
303 : [errout] "=r" (retval), \
304 [output] "=&A"(x) \
305 : [lowbits] "m" (__m(__ptr)), \
306 [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
307 [efault] "i" (-EFAULT), "0" (retval)); \
308})
309
310#else
311#define __get_user_asm_u64(x, ptr, retval) \
312 __get_user_asm(x, ptr, retval, "q", "=r")
313#endif
314
315#define __get_user_size(x, ptr, size, retval) \
316do { \
317 unsigned char x_u8__; \
318 \
319 retval = 0; \
320 __chk_user_ptr(ptr); \
321 switch (size) { \
322 case 1: \
323 __get_user_asm(x_u8__, ptr, retval, "b", "=q"); \
324 (x) = x_u8__; \
325 break; \
326 case 2: \
327 __get_user_asm(x, ptr, retval, "w", "=r"); \
328 break; \
329 case 4: \
330 __get_user_asm(x, ptr, retval, "l", "=r"); \
331 break; \
332 case 8: \
333 __get_user_asm_u64(x, ptr, retval); \
334 break; \
335 default: \
336 (x) = __get_user_bad(); \
337 } \
338} while (0)
339
340#define __get_user_asm(x, addr, err, itype, ltype) \
341 asm volatile("\n" \
342 "1: mov"itype" %[umem],%[output]\n" \
343 "2:\n" \
344 ".section .fixup,\"ax\"\n" \
345 "3: mov %[efault],%[errout]\n" \
346 " xor"itype" %[output],%[output]\n" \
347 " jmp 2b\n" \
348 ".previous\n" \
349 _ASM_EXTABLE_UA(1b, 3b) \
350 : [errout] "=r" (err), \
351 [output] ltype(x) \
352 : [umem] "m" (__m(addr)), \
353 [efault] "i" (-EFAULT), "0" (err))
354
355#define __put_user_nocheck(x, ptr, size) \
356({ \
357 __label__ __pu_label; \
358 int __pu_err = -EFAULT; \
359 __typeof__(*(ptr)) __pu_val = (x); \
360 __typeof__(ptr) __pu_ptr = (ptr); \
361 __typeof__(size) __pu_size = (size); \
362 __uaccess_begin(); \
363 __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
364 __pu_err = 0; \
365__pu_label: \
366 __uaccess_end(); \
367 __builtin_expect(__pu_err, 0); \
368})
369
370#define __get_user_nocheck(x, ptr, size) \
371({ \
372 int __gu_err; \
373 __inttype(*(ptr)) __gu_val; \
374 __typeof__(ptr) __gu_ptr = (ptr); \
375 __typeof__(size) __gu_size = (size); \
376 __uaccess_begin_nospec(); \
377 __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err); \
378 __uaccess_end(); \
379 (x) = (__force __typeof__(*(ptr)))__gu_val; \
380 __builtin_expect(__gu_err, 0); \
381})
382
383
384struct __large_struct { unsigned long buf[100]; };
385#define __m(x) (*(struct __large_struct __user *)(x))
386
387
388
389
390
391
392#define __put_user_goto(x, addr, itype, ltype, label) \
393 asm_volatile_goto("\n" \
394 "1: mov"itype" %0,%1\n" \
395 _ASM_EXTABLE_UA(1b, %l2) \
396 : : ltype(x), "m" (__m(addr)) \
397 : : label)
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421#define __get_user(x, ptr) \
422 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445#define __put_user(x, ptr) \
446 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
447
448extern unsigned long
449copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
450extern __must_check long
451strncpy_from_user(char *dst, const char __user *src, long count);
452
453extern __must_check long strnlen_user(const char __user *str, long n);
454
455unsigned long __must_check clear_user(void __user *mem, unsigned long len);
456unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
457
458
459
460
461#ifdef CONFIG_X86_INTEL_USERCOPY
462extern struct movsl_mask {
463 int mask;
464} ____cacheline_aligned_in_smp movsl_mask;
465#endif
466
467#define ARCH_HAS_NOCACHE_UACCESS 1
468
469#ifdef CONFIG_X86_32
470# include <asm/uaccess_32.h>
471#else
472# include <asm/uaccess_64.h>
473#endif
474
475
476
477
478
479
480
481static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
482{
483 if (unlikely(!access_ok(ptr,len)))
484 return 0;
485 __uaccess_begin_nospec();
486 return 1;
487}
488#define user_access_begin(a,b) user_access_begin(a,b)
489#define user_access_end() __uaccess_end()
490
491#define user_access_save() smap_save()
492#define user_access_restore(x) smap_restore(x)
493
494#define unsafe_put_user(x, ptr, label) \
495 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
496
497#define unsafe_get_user(x, ptr, err_label) \
498do { \
499 int __gu_err; \
500 __inttype(*(ptr)) __gu_val; \
501 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \
502 (x) = (__force __typeof__(*(ptr)))__gu_val; \
503 if (unlikely(__gu_err)) goto err_label; \
504} while (0)
505
506
507
508
509
510#define unsafe_copy_loop(dst, src, len, type, label) \
511 while (len >= sizeof(type)) { \
512 unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
513 dst += sizeof(type); \
514 src += sizeof(type); \
515 len -= sizeof(type); \
516 }
517
518#define unsafe_copy_to_user(_dst,_src,_len,label) \
519do { \
520 char __user *__ucu_dst = (_dst); \
521 const char *__ucu_src = (_src); \
522 size_t __ucu_len = (_len); \
523 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
524 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
525 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
526 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
527} while (0)
528
529#define HAVE_GET_KERNEL_NOFAULT
530
531#define __get_kernel_nofault(dst, src, type, err_label) \
532do { \
533 int __kr_err; \
534 \
535 __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
536 sizeof(type), __kr_err); \
537 if (unlikely(__kr_err)) \
538 goto err_label; \
539} while (0)
540
541#define __put_kernel_nofault(dst, src, type, err_label) \
542 __put_user_size(*((type *)(src)), (__force type __user *)(dst), \
543 sizeof(type), err_label)
544
545#endif
546
547