1#ifndef _ASM_X86_UACCESS_H
2#define _ASM_X86_UACCESS_H
3
4
5
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
12
13#define VERIFY_READ 0
14#define VERIFY_WRITE 1
15
16
17
18
19
20
21
22
23
24#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
25
26#define KERNEL_DS MAKE_MM_SEG(-1UL)
27#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
28
29#define get_ds() (KERNEL_DS)
30#define get_fs() (current_thread_info()->addr_limit)
31#define set_fs(x) (current_thread_info()->addr_limit = (x))
32
33#define segment_eq(a, b) ((a).seg == (b).seg)
34
35#define __addr_ok(addr) \
36 ((unsigned long __force)(addr) < \
37 (current_thread_info()->addr_limit.seg))
38
39
40
41
42
43
44
45
46
47
48
49#define __range_not_ok(addr, size) \
50({ \
51 unsigned long flag, roksum; \
52 __chk_user_ptr(addr); \
53 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
54 : "=&r" (flag), "=r" (roksum) \
55 : "1" (addr), "g" ((long)(size)), \
56 "rm" (current_thread_info()->addr_limit.seg)); \
57 flag; \
58})
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94struct exception_table_entry {
95 unsigned long insn, fixup;
96};
97
98extern int fixup_exception(struct pt_regs *regs);
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115extern int __get_user_1(void);
116extern int __get_user_2(void);
117extern int __get_user_4(void);
118extern int __get_user_8(void);
119extern int __get_user_bad(void);
120
121#define __get_user_x(size, ret, x, ptr) \
122 asm volatile("call __get_user_" #size \
123 : "=a" (ret), "=d" (x) \
124 : "0" (ptr)) \
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146#ifdef CONFIG_X86_32
147#define __get_user_8(__ret_gu, __val_gu, ptr) \
148 __get_user_x(X, __ret_gu, __val_gu, ptr)
149#else
150#define __get_user_8(__ret_gu, __val_gu, ptr) \
151 __get_user_x(8, __ret_gu, __val_gu, ptr)
152#endif
153
154#define get_user(x, ptr) \
155({ \
156 int __ret_gu; \
157 unsigned long __val_gu; \
158 __chk_user_ptr(ptr); \
159 might_fault(); \
160 switch (sizeof(*(ptr))) { \
161 case 1: \
162 __get_user_x(1, __ret_gu, __val_gu, ptr); \
163 break; \
164 case 2: \
165 __get_user_x(2, __ret_gu, __val_gu, ptr); \
166 break; \
167 case 4: \
168 __get_user_x(4, __ret_gu, __val_gu, ptr); \
169 break; \
170 case 8: \
171 __get_user_8(__ret_gu, __val_gu, ptr); \
172 break; \
173 default: \
174 __get_user_x(X, __ret_gu, __val_gu, ptr); \
175 break; \
176 } \
177 (x) = (__typeof__(*(ptr)))__val_gu; \
178 __ret_gu; \
179})
180
181#define __put_user_x(size, x, ptr, __ret_pu) \
182 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
183 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
184
185
186
187#ifdef CONFIG_X86_32
188#define __put_user_asm_u64(x, addr, err, errret) \
189 asm volatile("1: movl %%eax,0(%2)\n" \
190 "2: movl %%edx,4(%2)\n" \
191 "3:\n" \
192 ".section .fixup,\"ax\"\n" \
193 "4: movl %3,%0\n" \
194 " jmp 3b\n" \
195 ".previous\n" \
196 _ASM_EXTABLE(1b, 4b) \
197 _ASM_EXTABLE(2b, 4b) \
198 : "=r" (err) \
199 : "A" (x), "r" (addr), "i" (errret), "0" (err))
200
201#define __put_user_asm_ex_u64(x, addr) \
202 asm volatile("1: movl %%eax,0(%1)\n" \
203 "2: movl %%edx,4(%1)\n" \
204 "3:\n" \
205 _ASM_EXTABLE(1b, 2b - 1b) \
206 _ASM_EXTABLE(2b, 3b - 2b) \
207 : : "A" (x), "r" (addr))
208
209#define __put_user_x8(x, ptr, __ret_pu) \
210 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
211 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
212#else
213#define __put_user_asm_u64(x, ptr, retval, errret) \
214 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
215#define __put_user_asm_ex_u64(x, addr) \
216 __put_user_asm_ex(x, addr, "q", "", "er")
217#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
218#endif
219
220extern void __put_user_bad(void);
221
222
223
224
225
226extern void __put_user_1(void);
227extern void __put_user_2(void);
228extern void __put_user_4(void);
229extern void __put_user_8(void);
230
231#ifdef CONFIG_X86_WP_WORKS_OK
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249#define put_user(x, ptr) \
250({ \
251 int __ret_pu; \
252 __typeof__(*(ptr)) __pu_val; \
253 __chk_user_ptr(ptr); \
254 might_fault(); \
255 __pu_val = x; \
256 switch (sizeof(*(ptr))) { \
257 case 1: \
258 __put_user_x(1, __pu_val, ptr, __ret_pu); \
259 break; \
260 case 2: \
261 __put_user_x(2, __pu_val, ptr, __ret_pu); \
262 break; \
263 case 4: \
264 __put_user_x(4, __pu_val, ptr, __ret_pu); \
265 break; \
266 case 8: \
267 __put_user_x8(__pu_val, ptr, __ret_pu); \
268 break; \
269 default: \
270 __put_user_x(X, __pu_val, ptr, __ret_pu); \
271 break; \
272 } \
273 __ret_pu; \
274})
275
276#define __put_user_size(x, ptr, size, retval, errret) \
277do { \
278 retval = 0; \
279 __chk_user_ptr(ptr); \
280 switch (size) { \
281 case 1: \
282 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
283 break; \
284 case 2: \
285 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
286 break; \
287 case 4: \
288 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
289 break; \
290 case 8: \
291 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
292 errret); \
293 break; \
294 default: \
295 __put_user_bad(); \
296 } \
297} while (0)
298
299#define __put_user_size_ex(x, ptr, size) \
300do { \
301 __chk_user_ptr(ptr); \
302 switch (size) { \
303 case 1: \
304 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
305 break; \
306 case 2: \
307 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
308 break; \
309 case 4: \
310 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
311 break; \
312 case 8: \
313 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
314 break; \
315 default: \
316 __put_user_bad(); \
317 } \
318} while (0)
319
320#else
321
322#define __put_user_size(x, ptr, size, retval, errret) \
323do { \
324 __typeof__(*(ptr))__pus_tmp = x; \
325 retval = 0; \
326 \
327 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
328 retval = errret; \
329} while (0)
330
331#define put_user(x, ptr) \
332({ \
333 int __ret_pu; \
334 __typeof__(*(ptr))__pus_tmp = x; \
335 __ret_pu = 0; \
336 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
337 sizeof(*(ptr))) != 0)) \
338 __ret_pu = -EFAULT; \
339 __ret_pu; \
340})
341#endif
342
343#ifdef CONFIG_X86_32
344#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
345#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
346#else
347#define __get_user_asm_u64(x, ptr, retval, errret) \
348 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
349#define __get_user_asm_ex_u64(x, ptr) \
350 __get_user_asm_ex(x, ptr, "q", "", "=r")
351#endif
352
353#define __get_user_size(x, ptr, size, retval, errret) \
354do { \
355 retval = 0; \
356 __chk_user_ptr(ptr); \
357 switch (size) { \
358 case 1: \
359 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
360 break; \
361 case 2: \
362 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
363 break; \
364 case 4: \
365 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
366 break; \
367 case 8: \
368 __get_user_asm_u64(x, ptr, retval, errret); \
369 break; \
370 default: \
371 (x) = __get_user_bad(); \
372 } \
373} while (0)
374
375#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
376 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
377 "2:\n" \
378 ".section .fixup,\"ax\"\n" \
379 "3: mov %3,%0\n" \
380 " xor"itype" %"rtype"1,%"rtype"1\n" \
381 " jmp 2b\n" \
382 ".previous\n" \
383 _ASM_EXTABLE(1b, 3b) \
384 : "=r" (err), ltype(x) \
385 : "m" (__m(addr)), "i" (errret), "0" (err))
386
387#define __get_user_size_ex(x, ptr, size) \
388do { \
389 __chk_user_ptr(ptr); \
390 switch (size) { \
391 case 1: \
392 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
393 break; \
394 case 2: \
395 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
396 break; \
397 case 4: \
398 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
399 break; \
400 case 8: \
401 __get_user_asm_ex_u64(x, ptr); \
402 break; \
403 default: \
404 (x) = __get_user_bad(); \
405 } \
406} while (0)
407
408#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
409 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
410 "2:\n" \
411 _ASM_EXTABLE(1b, 2b - 1b) \
412 : ltype(x) : "m" (__m(addr)))
413
414#define __put_user_nocheck(x, ptr, size) \
415({ \
416 int __pu_err; \
417 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
418 __pu_err; \
419})
420
421#define __get_user_nocheck(x, ptr, size) \
422({ \
423 int __gu_err; \
424 unsigned long __gu_val; \
425 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
426 (x) = (__force __typeof__(*(ptr)))__gu_val; \
427 __gu_err; \
428})
429
430
431struct __large_struct { unsigned long buf[100]; };
432#define __m(x) (*(struct __large_struct __user *)(x))
433
434
435
436
437
438
439#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
440 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
441 "2:\n" \
442 ".section .fixup,\"ax\"\n" \
443 "3: mov %3,%0\n" \
444 " jmp 2b\n" \
445 ".previous\n" \
446 _ASM_EXTABLE(1b, 3b) \
447 : "=r"(err) \
448 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
449
450#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
451 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
452 "2:\n" \
453 _ASM_EXTABLE(1b, 2b - 1b) \
454 : : ltype(x), "m" (__m(addr)))
455
456
457
458
459#define uaccess_try do { \
460 int prev_err = current_thread_info()->uaccess_err; \
461 current_thread_info()->uaccess_err = 0; \
462 barrier();
463
464#define uaccess_catch(err) \
465 (err) |= current_thread_info()->uaccess_err; \
466 current_thread_info()->uaccess_err = prev_err; \
467} while (0)
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490#define __get_user(x, ptr) \
491 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513#define __put_user(x, ptr) \
514 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
515
516#define __get_user_unaligned __get_user
517#define __put_user_unaligned __put_user
518
519
520
521
522
523
524
525
526#define get_user_try uaccess_try
527#define get_user_catch(err) uaccess_catch(err)
528
529#define get_user_ex(x, ptr) do { \
530 unsigned long __gue_val; \
531 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
532 (x) = (__force __typeof__(*(ptr)))__gue_val; \
533} while (0)
534
535#ifdef CONFIG_X86_WP_WORKS_OK
536
537#define put_user_try uaccess_try
538#define put_user_catch(err) uaccess_catch(err)
539
540#define put_user_ex(x, ptr) \
541 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
542
543#else
544
545#define put_user_try do { \
546 int __uaccess_err = 0;
547
548#define put_user_catch(err) \
549 (err) |= __uaccess_err; \
550} while (0)
551
552#define put_user_ex(x, ptr) do { \
553 __uaccess_err |= __put_user(x, ptr); \
554} while (0)
555
556#endif
557
558
559
560
561#ifdef CONFIG_X86_INTEL_USERCOPY
562extern struct movsl_mask {
563 int mask;
564} ____cacheline_aligned_in_smp movsl_mask;
565#endif
566
567#define ARCH_HAS_NOCACHE_UACCESS 1
568
569#ifdef CONFIG_X86_32
570# include "uaccess_32.h"
571#else
572# include "uaccess_64.h"
573#endif
574
575#endif
576
577