1#ifndef _ASM_M32R_UACCESS_H
2#define _ASM_M32R_UACCESS_H
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/errno.h>
15#include <linux/thread_info.h>
16#include <asm/page.h>
17#include <asm/setup.h>
18
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
22
23
24
25
26
27
28
29
30#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
31
32#ifdef CONFIG_MMU
33
34#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
35#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
36#define get_ds() (KERNEL_DS)
37#define get_fs() (current_thread_info()->addr_limit)
38#define set_fs(x) (current_thread_info()->addr_limit = (x))
39
40#else
41
42#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
43#define USER_DS MAKE_MM_SEG(0xFFFFFFFF)
44#define get_ds() (KERNEL_DS)
45
46static inline mm_segment_t get_fs(void)
47{
48 return USER_DS;
49}
50
51static inline void set_fs(mm_segment_t s)
52{
53}
54
55#endif
56
57#define segment_eq(a, b) ((a).seg == (b).seg)
58
59#define __addr_ok(addr) \
60 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
61
62
63
64
65
66
67
68
69
70
71#define __range_ok(addr, size) ({ \
72 unsigned long flag, roksum; \
73 __chk_user_ptr(addr); \
74 asm ( \
75 " cmpu %1, %1 ; clear cbit\n" \
76 " addx %1, %3 ; set cbit if overflow\n" \
77 " subx %0, %0\n" \
78 " cmpu %4, %1\n" \
79 " subx %0, %5\n" \
80 : "=&r" (flag), "=r" (roksum) \
81 : "1" (addr), "r" ((int)(size)), \
82 "r" (current_thread_info()->addr_limit.seg), "r" (0) \
83 : "cbit" ); \
84 flag; })
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106#ifdef CONFIG_MMU
107#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
108#else
109static inline int access_ok(int type, const void *addr, unsigned long size)
110{
111 unsigned long val = (unsigned long)addr;
112
113 return ((val >= memory_start) && ((val + size) < memory_end));
114}
115#endif
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130struct exception_table_entry
131{
132 unsigned long insn, fixup;
133};
134
135extern int fixup_exception(struct pt_regs *regs);
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172#define get_user(x, ptr) \
173 __get_user_check((x), (ptr), sizeof(*(ptr)))
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192#define put_user(x, ptr) \
193 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216#define __get_user(x, ptr) \
217 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
218
219#define __get_user_nocheck(x, ptr, size) \
220({ \
221 long __gu_err = 0; \
222 unsigned long __gu_val = 0; \
223 might_fault(); \
224 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
225 (x) = (__force __typeof__(*(ptr)))__gu_val; \
226 __gu_err; \
227})
228
229#define __get_user_check(x, ptr, size) \
230({ \
231 long __gu_err = -EFAULT; \
232 unsigned long __gu_val = 0; \
233 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
234 might_fault(); \
235 if (access_ok(VERIFY_READ, __gu_addr, size)) \
236 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
237 (x) = (__force __typeof__(*(ptr)))__gu_val; \
238 __gu_err; \
239})
240
241extern long __get_user_bad(void);
242
243#define __get_user_size(x, ptr, size, retval) \
244do { \
245 retval = 0; \
246 __chk_user_ptr(ptr); \
247 switch (size) { \
248 case 1: __get_user_asm(x, ptr, retval, "ub"); break; \
249 case 2: __get_user_asm(x, ptr, retval, "uh"); break; \
250 case 4: __get_user_asm(x, ptr, retval, ""); break; \
251 default: (x) = __get_user_bad(); \
252 } \
253} while (0)
254
255#define __get_user_asm(x, addr, err, itype) \
256 __asm__ __volatile__( \
257 " .fillinsn\n" \
258 "1: ld"itype" %1,@%2\n" \
259 " .fillinsn\n" \
260 "2:\n" \
261 ".section .fixup,\"ax\"\n" \
262 " .balign 4\n" \
263 "3: ldi %0,%3\n" \
264 " seth r14,#high(2b)\n" \
265 " or3 r14,r14,#low(2b)\n" \
266 " jmp r14\n" \
267 ".previous\n" \
268 ".section __ex_table,\"a\"\n" \
269 " .balign 4\n" \
270 " .long 1b,3b\n" \
271 ".previous" \
272 : "=&r" (err), "=&r" (x) \
273 : "r" (addr), "i" (-EFAULT), "0" (err) \
274 : "r14", "memory")
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296#define __put_user(x, ptr) \
297 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
298
299
300#define __put_user_nocheck(x, ptr, size) \
301({ \
302 long __pu_err; \
303 might_fault(); \
304 __put_user_size((x), (ptr), (size), __pu_err); \
305 __pu_err; \
306})
307
308
309#define __put_user_check(x, ptr, size) \
310({ \
311 long __pu_err = -EFAULT; \
312 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
313 might_fault(); \
314 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
315 __put_user_size((x), __pu_addr, (size), __pu_err); \
316 __pu_err; \
317})
318
319#if defined(__LITTLE_ENDIAN__)
320#define __put_user_u64(x, addr, err) \
321 __asm__ __volatile__( \
322 " .fillinsn\n" \
323 "1: st %L1,@%2\n" \
324 " .fillinsn\n" \
325 "2: st %H1,@(4,%2)\n" \
326 " .fillinsn\n" \
327 "3:\n" \
328 ".section .fixup,\"ax\"\n" \
329 " .balign 4\n" \
330 "4: ldi %0,%3\n" \
331 " seth r14,#high(3b)\n" \
332 " or3 r14,r14,#low(3b)\n" \
333 " jmp r14\n" \
334 ".previous\n" \
335 ".section __ex_table,\"a\"\n" \
336 " .balign 4\n" \
337 " .long 1b,4b\n" \
338 " .long 2b,4b\n" \
339 ".previous" \
340 : "=&r" (err) \
341 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err) \
342 : "r14", "memory")
343
344#elif defined(__BIG_ENDIAN__)
345#define __put_user_u64(x, addr, err) \
346 __asm__ __volatile__( \
347 " .fillinsn\n" \
348 "1: st %H1,@%2\n" \
349 " .fillinsn\n" \
350 "2: st %L1,@(4,%2)\n" \
351 " .fillinsn\n" \
352 "3:\n" \
353 ".section .fixup,\"ax\"\n" \
354 " .balign 4\n" \
355 "4: ldi %0,%3\n" \
356 " seth r14,#high(3b)\n" \
357 " or3 r14,r14,#low(3b)\n" \
358 " jmp r14\n" \
359 ".previous\n" \
360 ".section __ex_table,\"a\"\n" \
361 " .balign 4\n" \
362 " .long 1b,4b\n" \
363 " .long 2b,4b\n" \
364 ".previous" \
365 : "=&r" (err) \
366 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err) \
367 : "r14", "memory")
368#else
369#error no endian defined
370#endif
371
372extern void __put_user_bad(void);
373
374#define __put_user_size(x, ptr, size, retval) \
375do { \
376 retval = 0; \
377 __chk_user_ptr(ptr); \
378 switch (size) { \
379 case 1: __put_user_asm(x, ptr, retval, "b"); break; \
380 case 2: __put_user_asm(x, ptr, retval, "h"); break; \
381 case 4: __put_user_asm(x, ptr, retval, ""); break; \
382 case 8: __put_user_u64((__typeof__(*ptr))(x), ptr, retval); break;\
383 default: __put_user_bad(); \
384 } \
385} while (0)
386
387struct __large_struct { unsigned long buf[100]; };
388#define __m(x) (*(struct __large_struct *)(x))
389
390
391
392
393
394
395#define __put_user_asm(x, addr, err, itype) \
396 __asm__ __volatile__( \
397 " .fillinsn\n" \
398 "1: st"itype" %1,@%2\n" \
399 " .fillinsn\n" \
400 "2:\n" \
401 ".section .fixup,\"ax\"\n" \
402 " .balign 4\n" \
403 "3: ldi %0,%3\n" \
404 " seth r14,#high(2b)\n" \
405 " or3 r14,r14,#low(2b)\n" \
406 " jmp r14\n" \
407 ".previous\n" \
408 ".section __ex_table,\"a\"\n" \
409 " .balign 4\n" \
410 " .long 1b,3b\n" \
411 ".previous" \
412 : "=&r" (err) \
413 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err) \
414 : "r14", "memory")
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429#define __copy_user(to, from, size) \
430do { \
431 unsigned long __dst, __src, __c; \
432 __asm__ __volatile__ ( \
433 " mv r14, %0\n" \
434 " or r14, %1\n" \
435 " beq %0, %1, 9f\n" \
436 " beqz %2, 9f\n" \
437 " and3 r14, r14, #3\n" \
438 " bnez r14, 2f\n" \
439 " and3 %2, %2, #3\n" \
440 " beqz %3, 2f\n" \
441 " addi %0, #-4 ; word_copy \n" \
442 " .fillinsn\n" \
443 "0: ld r14, @%1+\n" \
444 " addi %3, #-1\n" \
445 " .fillinsn\n" \
446 "1: st r14, @+%0\n" \
447 " bnez %3, 0b\n" \
448 " beqz %2, 9f\n" \
449 " addi %0, #4\n" \
450 " .fillinsn\n" \
451 "2: ldb r14, @%1 ; byte_copy \n" \
452 " .fillinsn\n" \
453 "3: stb r14, @%0\n" \
454 " addi %1, #1\n" \
455 " addi %2, #-1\n" \
456 " addi %0, #1\n" \
457 " bnez %2, 2b\n" \
458 " .fillinsn\n" \
459 "9:\n" \
460 ".section .fixup,\"ax\"\n" \
461 " .balign 4\n" \
462 "5: addi %3, #1\n" \
463 " addi %1, #-4\n" \
464 " .fillinsn\n" \
465 "6: slli %3, #2\n" \
466 " add %2, %3\n" \
467 " addi %0, #4\n" \
468 " .fillinsn\n" \
469 "7: seth r14, #high(9b)\n" \
470 " or3 r14, r14, #low(9b)\n" \
471 " jmp r14\n" \
472 ".previous\n" \
473 ".section __ex_table,\"a\"\n" \
474 " .balign 4\n" \
475 " .long 0b,6b\n" \
476 " .long 1b,5b\n" \
477 " .long 2b,9b\n" \
478 " .long 3b,9b\n" \
479 ".previous\n" \
480 : "=&r" (__dst), "=&r" (__src), "=&r" (size), \
481 "=&r" (__c) \
482 : "0" (to), "1" (from), "2" (size), "3" (size / 4) \
483 : "r14", "memory"); \
484} while (0)
485
486#define __copy_user_zeroing(to, from, size) \
487do { \
488 unsigned long __dst, __src, __c; \
489 __asm__ __volatile__ ( \
490 " mv r14, %0\n" \
491 " or r14, %1\n" \
492 " beq %0, %1, 9f\n" \
493 " beqz %2, 9f\n" \
494 " and3 r14, r14, #3\n" \
495 " bnez r14, 2f\n" \
496 " and3 %2, %2, #3\n" \
497 " beqz %3, 2f\n" \
498 " addi %0, #-4 ; word_copy \n" \
499 " .fillinsn\n" \
500 "0: ld r14, @%1+\n" \
501 " addi %3, #-1\n" \
502 " .fillinsn\n" \
503 "1: st r14, @+%0\n" \
504 " bnez %3, 0b\n" \
505 " beqz %2, 9f\n" \
506 " addi %0, #4\n" \
507 " .fillinsn\n" \
508 "2: ldb r14, @%1 ; byte_copy \n" \
509 " .fillinsn\n" \
510 "3: stb r14, @%0\n" \
511 " addi %1, #1\n" \
512 " addi %2, #-1\n" \
513 " addi %0, #1\n" \
514 " bnez %2, 2b\n" \
515 " .fillinsn\n" \
516 "9:\n" \
517 ".section .fixup,\"ax\"\n" \
518 " .balign 4\n" \
519 "5: addi %3, #1\n" \
520 " addi %1, #-4\n" \
521 " .fillinsn\n" \
522 "6: slli %3, #2\n" \
523 " add %2, %3\n" \
524 " addi %0, #4\n" \
525 " .fillinsn\n" \
526 "7: ldi r14, #0 ; store zero \n" \
527 " .fillinsn\n" \
528 "8: addi %2, #-1\n" \
529 " stb r14, @%0 ; ACE? \n" \
530 " addi %0, #1\n" \
531 " bnez %2, 8b\n" \
532 " seth r14, #high(9b)\n" \
533 " or3 r14, r14, #low(9b)\n" \
534 " jmp r14\n" \
535 ".previous\n" \
536 ".section __ex_table,\"a\"\n" \
537 " .balign 4\n" \
538 " .long 0b,6b\n" \
539 " .long 1b,5b\n" \
540 " .long 2b,7b\n" \
541 " .long 3b,7b\n" \
542 ".previous\n" \
543 : "=&r" (__dst), "=&r" (__src), "=&r" (size), \
544 "=&r" (__c) \
545 : "0" (to), "1" (from), "2" (size), "3" (size / 4) \
546 : "r14", "memory"); \
547} while (0)
548
549
550
551
552
553static inline unsigned long __generic_copy_from_user_nocheck(void *to,
554 const void __user *from, unsigned long n)
555{
556 __copy_user_zeroing(to, from, n);
557 return n;
558}
559
560static inline unsigned long __generic_copy_to_user_nocheck(void __user *to,
561 const void *from, unsigned long n)
562{
563 __copy_user(to, from, n);
564 return n;
565}
566
567unsigned long __generic_copy_to_user(void __user *, const void *, unsigned long);
568unsigned long __generic_copy_from_user(void *, const void __user *, unsigned long);
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585#define __copy_to_user(to, from, n) \
586 __generic_copy_to_user_nocheck((to), (from), (n))
587
588#define __copy_to_user_inatomic __copy_to_user
589#define __copy_from_user_inatomic __copy_from_user
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605#define copy_to_user(to, from, n) \
606({ \
607 might_fault(); \
608 __generic_copy_to_user((to), (from), (n)); \
609})
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628#define __copy_from_user(to, from, n) \
629 __generic_copy_from_user_nocheck((to), (from), (n))
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648#define copy_from_user(to, from, n) \
649({ \
650 might_fault(); \
651 __generic_copy_from_user((to), (from), (n)); \
652})
653
654long __must_check strncpy_from_user(char *dst, const char __user *src,
655 long count);
656long __must_check __strncpy_from_user(char *dst,
657 const char __user *src, long count);
658
659
660
661
662
663
664
665
666
667
668
669
670unsigned long __clear_user(void __user *mem, unsigned long len);
671
672
673
674
675
676
677
678
679
680
681
682
683unsigned long clear_user(void __user *mem, unsigned long len);
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
701long strnlen_user(const char __user *str, long n);
702
703#endif
704