1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _ASM_TILE_UACCESS_H
16#define _ASM_TILE_UACCESS_H
17
18
19
20
21#include <linux/sched.h>
22#include <linux/mm.h>
23#include <asm-generic/uaccess-unaligned.h>
24#include <asm/processor.h>
25#include <asm/page.h>
26
27#define VERIFY_READ 0
28#define VERIFY_WRITE 1
29
30
31
32
33
34
35
36
37#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
38
39#define KERNEL_DS MAKE_MM_SEG(-1UL)
40#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
41
42#define get_ds() (KERNEL_DS)
43#define get_fs() (current_thread_info()->addr_limit)
44#define set_fs(x) (current_thread_info()->addr_limit = (x))
45
46#define segment_eq(a, b) ((a).seg == (b).seg)
47
48#ifndef __tilegx__
49
50
51
52
53
54
55static inline int is_arch_mappable_range(unsigned long addr,
56 unsigned long size)
57{
58 return (addr >= MEM_USER_INTRPT &&
59 addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
60 size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
61}
62#define is_arch_mappable_range is_arch_mappable_range
63#else
64#define is_arch_mappable_range(addr, size) 0
65#endif
66
67
68
69
70
71int __range_ok(unsigned long addr, unsigned long size);
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92#define access_ok(type, addr, size) ({ \
93 __chk_user_ptr(addr); \
94 likely(__range_ok((unsigned long)(addr), (size)) == 0); \
95})
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110struct exception_table_entry {
111 unsigned long insn, fixup;
112};
113
114extern int fixup_exception(struct pt_regs *regs);
115
116
117
118
119
120#define __inttype(x) \
121 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
122
123
124
125
126
127
128#ifdef __LP64__
129#define _ASM_PTR ".quad"
130#define _ASM_ALIGN ".align 8"
131#else
132#define _ASM_PTR ".long"
133#define _ASM_ALIGN ".align 4"
134#endif
135
136#define __get_user_asm(OP, x, ptr, ret) \
137 asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
138 ".pushsection .fixup,\"ax\"\n" \
139 "0: { movei %1, 0; movei %0, %3 }\n" \
140 "j 9f\n" \
141 ".section __ex_table,\"a\"\n" \
142 _ASM_ALIGN "\n" \
143 _ASM_PTR " 1b, 0b\n" \
144 ".popsection\n" \
145 "9:" \
146 : "=r" (ret), "=r" (x) \
147 : "r" (ptr), "i" (-EFAULT))
148
149#ifdef __tilegx__
150#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
151#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
152#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
153#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
154#else
155#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
156#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
157#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
158#ifdef __LITTLE_ENDIAN
159#define __lo32(a, b) a
160#define __hi32(a, b) b
161#else
162#define __lo32(a, b) b
163#define __hi32(a, b) a
164#endif
165#define __get_user_8(x, ptr, ret) \
166 ({ \
167 unsigned int __a, __b; \
168 asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n" \
169 "2: { lw %2, %2; movei %0, 0 }\n" \
170 ".pushsection .fixup,\"ax\"\n" \
171 "0: { movei %1, 0; movei %2, 0 }\n" \
172 "{ movei %0, %4; j 9f }\n" \
173 ".section __ex_table,\"a\"\n" \
174 ".align 4\n" \
175 ".word 1b, 0b\n" \
176 ".word 2b, 0b\n" \
177 ".popsection\n" \
178 "9:" \
179 : "=r" (ret), "=r" (__a), "=&r" (__b) \
180 : "r" (ptr), "i" (-EFAULT)); \
181 (x) = (__force __typeof(x))(__inttype(x)) \
182 (((u64)__hi32(__a, __b) << 32) | \
183 __lo32(__a, __b)); \
184 })
185#endif
186
187extern int __get_user_bad(void)
188 __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210#define __get_user(x, ptr) \
211 ({ \
212 int __ret; \
213 typeof(x) _x; \
214 __chk_user_ptr(ptr); \
215 switch (sizeof(*(ptr))) { \
216 case 1: __get_user_1(_x, ptr, __ret); break; \
217 case 2: __get_user_2(_x, ptr, __ret); break; \
218 case 4: __get_user_4(_x, ptr, __ret); break; \
219 case 8: __get_user_8(_x, ptr, __ret); break; \
220 default: __ret = __get_user_bad(); break; \
221 } \
222 (x) = (typeof(*(ptr))) _x; \
223 __ret; \
224 })
225
226
227
228#define __put_user_asm(OP, x, ptr, ret) \
229 asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
230 ".pushsection .fixup,\"ax\"\n" \
231 "0: { movei %0, %3; j 9f }\n" \
232 ".section __ex_table,\"a\"\n" \
233 _ASM_ALIGN "\n" \
234 _ASM_PTR " 1b, 0b\n" \
235 ".popsection\n" \
236 "9:" \
237 : "=r" (ret) \
238 : "r" (ptr), "r" (x), "i" (-EFAULT))
239
240#ifdef __tilegx__
241#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
242#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
243#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
244#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
245#else
246#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
247#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
248#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
249#define __put_user_8(x, ptr, ret) \
250 ({ \
251 u64 __x = (__force __inttype(x))(x); \
252 int __lo = (int) __x, __hi = (int) (__x >> 32); \
253 asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n" \
254 "2: { sw %0, %3; movei %0, 0 }\n" \
255 ".pushsection .fixup,\"ax\"\n" \
256 "0: { movei %0, %4; j 9f }\n" \
257 ".section __ex_table,\"a\"\n" \
258 ".align 4\n" \
259 ".word 1b, 0b\n" \
260 ".word 2b, 0b\n" \
261 ".popsection\n" \
262 "9:" \
263 : "=&r" (ret) \
264 : "r" (ptr), "r" (__lo32(__lo, __hi)), \
265 "r" (__hi32(__lo, __hi)), "i" (-EFAULT)); \
266 })
267#endif
268
269extern int __put_user_bad(void)
270 __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291#define __put_user(x, ptr) \
292({ \
293 int __ret; \
294 typeof(*(ptr)) _x = (x); \
295 __chk_user_ptr(ptr); \
296 switch (sizeof(*(ptr))) { \
297 case 1: __put_user_1(_x, ptr, __ret); break; \
298 case 2: __put_user_2(_x, ptr, __ret); break; \
299 case 4: __put_user_4(_x, ptr, __ret); break; \
300 case 8: __put_user_8(_x, ptr, __ret); break; \
301 default: __ret = __put_user_bad(); break; \
302 } \
303 __ret; \
304})
305
306
307
308
309
310
311#define put_user(x, ptr) \
312({ \
313 __typeof__(*(ptr)) __user *__Pu_addr = (ptr); \
314 access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ? \
315 __put_user((x), (__Pu_addr)) : \
316 -EFAULT; \
317})
318
319#define get_user(x, ptr) \
320({ \
321 __typeof__(*(ptr)) const __user *__Gu_addr = (ptr); \
322 access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ? \
323 __get_user((x), (__Gu_addr)) : \
324 ((x) = 0, -EFAULT); \
325})
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345extern unsigned long __must_check __copy_to_user_inatomic(
346 void __user *to, const void *from, unsigned long n);
347
348static inline unsigned long __must_check
349__copy_to_user(void __user *to, const void *from, unsigned long n)
350{
351 might_fault();
352 return __copy_to_user_inatomic(to, from, n);
353}
354
355static inline unsigned long __must_check
356copy_to_user(void __user *to, const void *from, unsigned long n)
357{
358 if (access_ok(VERIFY_WRITE, to, n))
359 n = __copy_to_user(to, from, n);
360 return n;
361}
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385extern unsigned long __must_check __copy_from_user_inatomic(
386 void *to, const void __user *from, unsigned long n);
387extern unsigned long __must_check __copy_from_user_zeroing(
388 void *to, const void __user *from, unsigned long n);
389
390static inline unsigned long __must_check
391__copy_from_user(void *to, const void __user *from, unsigned long n)
392{
393 might_fault();
394 return __copy_from_user_zeroing(to, from, n);
395}
396
397static inline unsigned long __must_check
398_copy_from_user(void *to, const void __user *from, unsigned long n)
399{
400 if (access_ok(VERIFY_READ, from, n))
401 n = __copy_from_user(to, from, n);
402 else
403 memset(to, 0, n);
404 return n;
405}
406
407#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
408
409
410
411
412
413extern void copy_from_user_overflow(void)
414 __compiletime_warning("copy_from_user() size is not provably correct");
415
416static inline unsigned long __must_check copy_from_user(void *to,
417 const void __user *from,
418 unsigned long n)
419{
420 int sz = __compiletime_object_size(to);
421
422 if (likely(sz == -1 || sz >= n))
423 n = _copy_from_user(to, from, n);
424 else
425 copy_from_user_overflow();
426
427 return n;
428}
429#else
430#define copy_from_user _copy_from_user
431#endif
432
433#ifdef __tilegx__
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448extern unsigned long __copy_in_user_inatomic(
449 void __user *to, const void __user *from, unsigned long n);
450
451static inline unsigned long __must_check
452__copy_in_user(void __user *to, const void __user *from, unsigned long n)
453{
454 might_fault();
455 return __copy_in_user_inatomic(to, from, n);
456}
457
458static inline unsigned long __must_check
459copy_in_user(void __user *to, const void __user *from, unsigned long n)
460{
461 if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
462 n = __copy_in_user(to, from, n);
463 return n;
464}
465#endif
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482extern long strnlen_user_asm(const char __user *str, long n);
483static inline long __must_check strnlen_user(const char __user *str, long n)
484{
485 might_fault();
486 return strnlen_user_asm(str, n);
487}
488#define strlen_user(str) strnlen_user(str, LONG_MAX)
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510extern long strncpy_from_user_asm(char *dst, const char __user *src, long);
511static inline long __must_check __strncpy_from_user(
512 char *dst, const char __user *src, long count)
513{
514 might_fault();
515 return strncpy_from_user_asm(dst, src, count);
516}
517static inline long __must_check strncpy_from_user(
518 char *dst, const char __user *src, long count)
519{
520 if (access_ok(VERIFY_READ, src, 1))
521 return __strncpy_from_user(dst, src, count);
522 return -EFAULT;
523}
524
525
526
527
528
529
530
531
532
533
534
535extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
536static inline unsigned long __must_check __clear_user(
537 void __user *mem, unsigned long len)
538{
539 might_fault();
540 return clear_user_asm(mem, len);
541}
542static inline unsigned long __must_check clear_user(
543 void __user *mem, unsigned long len)
544{
545 if (access_ok(VERIFY_WRITE, mem, len))
546 return __clear_user(mem, len);
547 return len;
548}
549
550
551
552
553
554
555
556
557
558extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
559static inline unsigned long __must_check __flush_user(
560 void __user *mem, unsigned long len)
561{
562 int retval;
563
564 might_fault();
565 retval = flush_user_asm(mem, len);
566 mb_incoherent();
567 return retval;
568}
569
570static inline unsigned long __must_check flush_user(
571 void __user *mem, unsigned long len)
572{
573 if (access_ok(VERIFY_WRITE, mem, len))
574 return __flush_user(mem, len);
575 return len;
576}
577
578
579
580
581
582
583
584
585
586extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
587static inline unsigned long __must_check __finv_user(
588 void __user *mem, unsigned long len)
589{
590 int retval;
591
592 might_fault();
593 retval = finv_user_asm(mem, len);
594 mb_incoherent();
595 return retval;
596}
597static inline unsigned long __must_check finv_user(
598 void __user *mem, unsigned long len)
599{
600 if (access_ok(VERIFY_WRITE, mem, len))
601 return __finv_user(mem, len);
602 return len;
603}
604
605#endif
606