1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_UACCESS_H
12#define _ASM_UACCESS_H
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/thread_info.h>
17#include <asm/asm-eva.h>
18
19
20
21
22
23
24
25
26#ifdef CONFIG_32BIT
27
28#ifdef CONFIG_KVM_GUEST
29#define __UA_LIMIT 0x40000000UL
30#else
31#define __UA_LIMIT 0x80000000UL
32#endif
33
34#define __UA_ADDR ".word"
35#define __UA_LA "la"
36#define __UA_ADDU "addu"
37#define __UA_t0 "$8"
38#define __UA_t1 "$9"
39
40#endif
41
42#ifdef CONFIG_64BIT
43
44extern u64 __ua_limit;
45
46#define __UA_LIMIT __ua_limit
47
48#define __UA_ADDR ".dword"
49#define __UA_LA "dla"
50#define __UA_ADDU "daddu"
51#define __UA_t0 "$12"
52#define __UA_t1 "$13"
53
54#endif
55
56
57
58
59
60
61
62
63
64#ifdef CONFIG_KVM_GUEST
65#define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
66#define USER_DS ((mm_segment_t) { 0xC0000000UL })
67#else
68#define KERNEL_DS ((mm_segment_t) { 0UL })
69#define USER_DS ((mm_segment_t) { __UA_LIMIT })
70#endif
71
72#define VERIFY_READ 0
73#define VERIFY_WRITE 1
74
75#define get_ds() (KERNEL_DS)
76#define get_fs() (current_thread_info()->addr_limit)
77#define set_fs(x) (current_thread_info()->addr_limit = (x))
78
79#define segment_eq(a, b) ((a).seg == (b).seg)
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define __ua_size(size) \
96 ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118#define __access_mask get_fs().seg
119
120#define __access_ok(addr, size, mask) \
121({ \
122 unsigned long __addr = (unsigned long) (addr); \
123 unsigned long __size = size; \
124 unsigned long __mask = mask; \
125 unsigned long __ok; \
126 \
127 __chk_user_ptr(addr); \
128 __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
129 __ua_size(__size))); \
130 __ok == 0; \
131})
132
133#define access_ok(type, addr, size) \
134 likely(__access_ok((addr), (size), __access_mask))
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152#define put_user(x,ptr) \
153 __put_user_check((x), (ptr), sizeof(*(ptr)))
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172#define get_user(x,ptr) \
173 __get_user_check((x), (ptr), sizeof(*(ptr)))
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194#define __put_user(x,ptr) \
195 __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217#define __get_user(x,ptr) \
218 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
219
220struct __large_struct { unsigned long buf[100]; };
221#define __m(x) (*(struct __large_struct __user *)(x))
222
223
224
225
226
227#ifndef CONFIG_EVA
228#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
229#else
230
231
232
233
234
235#undef _loadd
236#undef _loadw
237#undef _loadh
238#undef _loadb
239#ifdef CONFIG_32BIT
240#define _loadd _loadw
241#else
242#define _loadd(reg, addr) "ld " reg ", " addr
243#endif
244#define _loadw(reg, addr) "lw " reg ", " addr
245#define _loadh(reg, addr) "lh " reg ", " addr
246#define _loadb(reg, addr) "lb " reg ", " addr
247
248#define __get_kernel_common(val, size, ptr) \
249do { \
250 switch (size) { \
251 case 1: __get_data_asm(val, _loadb, ptr); break; \
252 case 2: __get_data_asm(val, _loadh, ptr); break; \
253 case 4: __get_data_asm(val, _loadw, ptr); break; \
254 case 8: __GET_DW(val, _loadd, ptr); break; \
255 default: __get_user_unknown(); break; \
256 } \
257} while (0)
258#endif
259
260#ifdef CONFIG_32BIT
261#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
262#endif
263#ifdef CONFIG_64BIT
264#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
265#endif
266
267extern void __get_user_unknown(void);
268
269#define __get_user_common(val, size, ptr) \
270do { \
271 switch (size) { \
272 case 1: __get_data_asm(val, user_lb, ptr); break; \
273 case 2: __get_data_asm(val, user_lh, ptr); break; \
274 case 4: __get_data_asm(val, user_lw, ptr); break; \
275 case 8: __GET_DW(val, user_ld, ptr); break; \
276 default: __get_user_unknown(); break; \
277 } \
278} while (0)
279
280#define __get_user_nocheck(x, ptr, size) \
281({ \
282 int __gu_err; \
283 \
284 if (segment_eq(get_fs(), get_ds())) { \
285 __get_kernel_common((x), size, ptr); \
286 } else { \
287 __chk_user_ptr(ptr); \
288 __get_user_common((x), size, ptr); \
289 } \
290 __gu_err; \
291})
292
293#define __get_user_check(x, ptr, size) \
294({ \
295 int __gu_err = -EFAULT; \
296 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
297 \
298 might_fault(); \
299 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \
300 if (segment_eq(get_fs(), get_ds())) \
301 __get_kernel_common((x), size, __gu_ptr); \
302 else \
303 __get_user_common((x), size, __gu_ptr); \
304 } else \
305 (x) = 0; \
306 \
307 __gu_err; \
308})
309
310#define __get_data_asm(val, insn, addr) \
311{ \
312 long __gu_tmp; \
313 \
314 __asm__ __volatile__( \
315 "1: "insn("%1", "%3")" \n" \
316 "2: \n" \
317 " .insn \n" \
318 " .section .fixup,\"ax\" \n" \
319 "3: li %0, %4 \n" \
320 " move %1, $0 \n" \
321 " j 2b \n" \
322 " .previous \n" \
323 " .section __ex_table,\"a\" \n" \
324 " "__UA_ADDR "\t1b, 3b \n" \
325 " .previous \n" \
326 : "=r" (__gu_err), "=r" (__gu_tmp) \
327 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
328 \
329 (val) = (__typeof__(*(addr))) __gu_tmp; \
330}
331
332
333
334
335#define __get_data_asm_ll32(val, insn, addr) \
336{ \
337 union { \
338 unsigned long long l; \
339 __typeof__(*(addr)) t; \
340 } __gu_tmp; \
341 \
342 __asm__ __volatile__( \
343 "1: " insn("%1", "(%3)")" \n" \
344 "2: " insn("%D1", "4(%3)")" \n" \
345 "3: \n" \
346 " .insn \n" \
347 " .section .fixup,\"ax\" \n" \
348 "4: li %0, %4 \n" \
349 " move %1, $0 \n" \
350 " move %D1, $0 \n" \
351 " j 3b \n" \
352 " .previous \n" \
353 " .section __ex_table,\"a\" \n" \
354 " " __UA_ADDR " 1b, 4b \n" \
355 " " __UA_ADDR " 2b, 4b \n" \
356 " .previous \n" \
357 : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
358 : "0" (0), "r" (addr), "i" (-EFAULT)); \
359 \
360 (val) = __gu_tmp.t; \
361}
362
363#ifndef CONFIG_EVA
364#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
365#else
366
367
368
369
370
371#undef _stored
372#undef _storew
373#undef _storeh
374#undef _storeb
375#ifdef CONFIG_32BIT
376#define _stored _storew
377#else
378#define _stored(reg, addr) "ld " reg ", " addr
379#endif
380
381#define _storew(reg, addr) "sw " reg ", " addr
382#define _storeh(reg, addr) "sh " reg ", " addr
383#define _storeb(reg, addr) "sb " reg ", " addr
384
385#define __put_kernel_common(ptr, size) \
386do { \
387 switch (size) { \
388 case 1: __put_data_asm(_storeb, ptr); break; \
389 case 2: __put_data_asm(_storeh, ptr); break; \
390 case 4: __put_data_asm(_storew, ptr); break; \
391 case 8: __PUT_DW(_stored, ptr); break; \
392 default: __put_user_unknown(); break; \
393 } \
394} while(0)
395#endif
396
397
398
399
400
401#ifdef CONFIG_32BIT
402#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
403#endif
404#ifdef CONFIG_64BIT
405#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
406#endif
407
408#define __put_user_common(ptr, size) \
409do { \
410 switch (size) { \
411 case 1: __put_data_asm(user_sb, ptr); break; \
412 case 2: __put_data_asm(user_sh, ptr); break; \
413 case 4: __put_data_asm(user_sw, ptr); break; \
414 case 8: __PUT_DW(user_sd, ptr); break; \
415 default: __put_user_unknown(); break; \
416 } \
417} while (0)
418
419#define __put_user_nocheck(x, ptr, size) \
420({ \
421 __typeof__(*(ptr)) __pu_val; \
422 int __pu_err = 0; \
423 \
424 __pu_val = (x); \
425 if (segment_eq(get_fs(), get_ds())) { \
426 __put_kernel_common(ptr, size); \
427 } else { \
428 __chk_user_ptr(ptr); \
429 __put_user_common(ptr, size); \
430 } \
431 __pu_err; \
432})
433
434#define __put_user_check(x, ptr, size) \
435({ \
436 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
437 __typeof__(*(ptr)) __pu_val = (x); \
438 int __pu_err = -EFAULT; \
439 \
440 might_fault(); \
441 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
442 if (segment_eq(get_fs(), get_ds())) \
443 __put_kernel_common(__pu_addr, size); \
444 else \
445 __put_user_common(__pu_addr, size); \
446 } \
447 \
448 __pu_err; \
449})
450
451#define __put_data_asm(insn, ptr) \
452{ \
453 __asm__ __volatile__( \
454 "1: "insn("%z2", "%3")" # __put_data_asm \n" \
455 "2: \n" \
456 " .insn \n" \
457 " .section .fixup,\"ax\" \n" \
458 "3: li %0, %4 \n" \
459 " j 2b \n" \
460 " .previous \n" \
461 " .section __ex_table,\"a\" \n" \
462 " " __UA_ADDR " 1b, 3b \n" \
463 " .previous \n" \
464 : "=r" (__pu_err) \
465 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
466 "i" (-EFAULT)); \
467}
468
469#define __put_data_asm_ll32(insn, ptr) \
470{ \
471 __asm__ __volatile__( \
472 "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
473 "2: "insn("%D2", "4(%3)")" \n" \
474 "3: \n" \
475 " .insn \n" \
476 " .section .fixup,\"ax\" \n" \
477 "4: li %0, %4 \n" \
478 " j 3b \n" \
479 " .previous \n" \
480 " .section __ex_table,\"a\" \n" \
481 " " __UA_ADDR " 1b, 4b \n" \
482 " " __UA_ADDR " 2b, 4b \n" \
483 " .previous" \
484 : "=r" (__pu_err) \
485 : "0" (0), "r" (__pu_val), "r" (ptr), \
486 "i" (-EFAULT)); \
487}
488
489extern void __put_user_unknown(void);
490
491
492
493
494
495#ifndef CONFIG_EVA
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512#define put_user_unaligned(x,ptr) \
513 __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532#define get_user_unaligned(x,ptr) \
533 __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554#define __put_user_unaligned(x,ptr) \
555 __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577#define __get_user_unaligned(x,ptr) \
578 __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
579
580
581
582
583
584#ifdef CONFIG_32BIT
585#define __GET_USER_UNALIGNED_DW(val, ptr) \
586 __get_user_unaligned_asm_ll32(val, ptr)
587#endif
588#ifdef CONFIG_64BIT
589#define __GET_USER_UNALIGNED_DW(val, ptr) \
590 __get_user_unaligned_asm(val, "uld", ptr)
591#endif
592
593extern void __get_user_unaligned_unknown(void);
594
595#define __get_user_unaligned_common(val, size, ptr) \
596do { \
597 switch (size) { \
598 case 1: __get_data_asm(val, "lb", ptr); break; \
599 case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
600 case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
601 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
602 default: __get_user_unaligned_unknown(); break; \
603 } \
604} while (0)
605
606#define __get_user_unaligned_nocheck(x,ptr,size) \
607({ \
608 int __gu_err; \
609 \
610 __get_user_unaligned_common((x), size, ptr); \
611 __gu_err; \
612})
613
614#define __get_user_unaligned_check(x,ptr,size) \
615({ \
616 int __gu_err = -EFAULT; \
617 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
618 \
619 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
620 __get_user_unaligned_common((x), size, __gu_ptr); \
621 \
622 __gu_err; \
623})
624
625#define __get_data_unaligned_asm(val, insn, addr) \
626{ \
627 long __gu_tmp; \
628 \
629 __asm__ __volatile__( \
630 "1: " insn " %1, %3 \n" \
631 "2: \n" \
632 " .insn \n" \
633 " .section .fixup,\"ax\" \n" \
634 "3: li %0, %4 \n" \
635 " move %1, $0 \n" \
636 " j 2b \n" \
637 " .previous \n" \
638 " .section __ex_table,\"a\" \n" \
639 " "__UA_ADDR "\t1b, 3b \n" \
640 " "__UA_ADDR "\t1b + 4, 3b \n" \
641 " .previous \n" \
642 : "=r" (__gu_err), "=r" (__gu_tmp) \
643 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
644 \
645 (val) = (__typeof__(*(addr))) __gu_tmp; \
646}
647
648
649
650
651#define __get_user_unaligned_asm_ll32(val, addr) \
652{ \
653 unsigned long long __gu_tmp; \
654 \
655 __asm__ __volatile__( \
656 "1: ulw %1, (%3) \n" \
657 "2: ulw %D1, 4(%3) \n" \
658 " move %0, $0 \n" \
659 "3: \n" \
660 " .insn \n" \
661 " .section .fixup,\"ax\" \n" \
662 "4: li %0, %4 \n" \
663 " move %1, $0 \n" \
664 " move %D1, $0 \n" \
665 " j 3b \n" \
666 " .previous \n" \
667 " .section __ex_table,\"a\" \n" \
668 " " __UA_ADDR " 1b, 4b \n" \
669 " " __UA_ADDR " 1b + 4, 4b \n" \
670 " " __UA_ADDR " 2b, 4b \n" \
671 " " __UA_ADDR " 2b + 4, 4b \n" \
672 " .previous \n" \
673 : "=r" (__gu_err), "=&r" (__gu_tmp) \
674 : "0" (0), "r" (addr), "i" (-EFAULT)); \
675 (val) = (__typeof__(*(addr))) __gu_tmp; \
676}
677
678
679
680
681
682#ifdef CONFIG_32BIT
683#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
684#endif
685#ifdef CONFIG_64BIT
686#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
687#endif
688
689#define __put_user_unaligned_common(ptr, size) \
690do { \
691 switch (size) { \
692 case 1: __put_data_asm("sb", ptr); break; \
693 case 2: __put_user_unaligned_asm("ush", ptr); break; \
694 case 4: __put_user_unaligned_asm("usw", ptr); break; \
695 case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
696 default: __put_user_unaligned_unknown(); break; \
697} while (0)
698
699#define __put_user_unaligned_nocheck(x,ptr,size) \
700({ \
701 __typeof__(*(ptr)) __pu_val; \
702 int __pu_err = 0; \
703 \
704 __pu_val = (x); \
705 __put_user_unaligned_common(ptr, size); \
706 __pu_err; \
707})
708
709#define __put_user_unaligned_check(x,ptr,size) \
710({ \
711 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
712 __typeof__(*(ptr)) __pu_val = (x); \
713 int __pu_err = -EFAULT; \
714 \
715 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
716 __put_user_unaligned_common(__pu_addr, size); \
717 \
718 __pu_err; \
719})
720
721#define __put_user_unaligned_asm(insn, ptr) \
722{ \
723 __asm__ __volatile__( \
724 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
725 "2: \n" \
726 " .insn \n" \
727 " .section .fixup,\"ax\" \n" \
728 "3: li %0, %4 \n" \
729 " j 2b \n" \
730 " .previous \n" \
731 " .section __ex_table,\"a\" \n" \
732 " " __UA_ADDR " 1b, 3b \n" \
733 " .previous \n" \
734 : "=r" (__pu_err) \
735 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
736 "i" (-EFAULT)); \
737}
738
739#define __put_user_unaligned_asm_ll32(ptr) \
740{ \
741 __asm__ __volatile__( \
742 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
743 "2: sw %D2, 4(%3) \n" \
744 "3: \n" \
745 " .insn \n" \
746 " .section .fixup,\"ax\" \n" \
747 "4: li %0, %4 \n" \
748 " j 3b \n" \
749 " .previous \n" \
750 " .section __ex_table,\"a\" \n" \
751 " " __UA_ADDR " 1b, 4b \n" \
752 " " __UA_ADDR " 1b + 4, 4b \n" \
753 " " __UA_ADDR " 2b, 4b \n" \
754 " " __UA_ADDR " 2b + 4, 4b \n" \
755 " .previous" \
756 : "=r" (__pu_err) \
757 : "0" (0), "r" (__pu_val), "r" (ptr), \
758 "i" (-EFAULT)); \
759}
760
761extern void __put_user_unaligned_unknown(void);
762#endif
763
764
765
766
767
768#ifdef MODULE
769#define __MODULE_JAL(destination) \
770 ".set\tnoat\n\t" \
771 __UA_LA "\t$1, " #destination "\n\t" \
772 "jalr\t$1\n\t" \
773 ".set\tat\n\t"
774#else
775#define __MODULE_JAL(destination) \
776 "jal\t" #destination "\n\t"
777#endif
778
779#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \
780 defined(CONFIG_CPU_HAS_PREFETCH))
781#define DADDI_SCRATCH "$3"
782#else
783#define DADDI_SCRATCH "$0"
784#endif
785
786extern size_t __copy_user(void *__to, const void *__from, size_t __n);
787
788#ifndef CONFIG_EVA
789#define __invoke_copy_to_user(to, from, n) \
790({ \
791 register void __user *__cu_to_r __asm__("$4"); \
792 register const void *__cu_from_r __asm__("$5"); \
793 register long __cu_len_r __asm__("$6"); \
794 \
795 __cu_to_r = (to); \
796 __cu_from_r = (from); \
797 __cu_len_r = (n); \
798 __asm__ __volatile__( \
799 __MODULE_JAL(__copy_user) \
800 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
801 : \
802 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
803 DADDI_SCRATCH, "memory"); \
804 __cu_len_r; \
805})
806
807#define __invoke_copy_to_kernel(to, from, n) \
808 __invoke_copy_to_user(to, from, n)
809
810#endif
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826#define __copy_to_user(to, from, n) \
827({ \
828 void __user *__cu_to; \
829 const void *__cu_from; \
830 long __cu_len; \
831 \
832 __cu_to = (to); \
833 __cu_from = (from); \
834 __cu_len = (n); \
835 might_fault(); \
836 if (segment_eq(get_fs(), get_ds())) \
837 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
838 __cu_len); \
839 else \
840 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
841 __cu_len); \
842 __cu_len; \
843})
844
845extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
846
847#define __copy_to_user_inatomic(to, from, n) \
848({ \
849 void __user *__cu_to; \
850 const void *__cu_from; \
851 long __cu_len; \
852 \
853 __cu_to = (to); \
854 __cu_from = (from); \
855 __cu_len = (n); \
856 if (segment_eq(get_fs(), get_ds())) \
857 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
858 __cu_len); \
859 else \
860 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
861 __cu_len); \
862 __cu_len; \
863})
864
865#define __copy_from_user_inatomic(to, from, n) \
866({ \
867 void *__cu_to; \
868 const void __user *__cu_from; \
869 long __cu_len; \
870 \
871 __cu_to = (to); \
872 __cu_from = (from); \
873 __cu_len = (n); \
874 if (segment_eq(get_fs(), get_ds())) \
875 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
876 __cu_from,\
877 __cu_len);\
878 else \
879 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
880 __cu_from, \
881 __cu_len); \
882 __cu_len; \
883})
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898#define copy_to_user(to, from, n) \
899({ \
900 void __user *__cu_to; \
901 const void *__cu_from; \
902 long __cu_len; \
903 \
904 __cu_to = (to); \
905 __cu_from = (from); \
906 __cu_len = (n); \
907 if (segment_eq(get_fs(), get_ds())) { \
908 __cu_len = __invoke_copy_to_kernel(__cu_to, \
909 __cu_from, \
910 __cu_len); \
911 } else { \
912 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
913 might_fault(); \
914 __cu_len = __invoke_copy_to_user(__cu_to, \
915 __cu_from, \
916 __cu_len); \
917 } \
918 } \
919 __cu_len; \
920})
921
922#ifndef CONFIG_EVA
923
924#define __invoke_copy_from_user(to, from, n) \
925({ \
926 register void *__cu_to_r __asm__("$4"); \
927 register const void __user *__cu_from_r __asm__("$5"); \
928 register long __cu_len_r __asm__("$6"); \
929 \
930 __cu_to_r = (to); \
931 __cu_from_r = (from); \
932 __cu_len_r = (n); \
933 __asm__ __volatile__( \
934 ".set\tnoreorder\n\t" \
935 __MODULE_JAL(__copy_user) \
936 ".set\tnoat\n\t" \
937 __UA_ADDU "\t$1, %1, %2\n\t" \
938 ".set\tat\n\t" \
939 ".set\treorder" \
940 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
941 : \
942 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
943 DADDI_SCRATCH, "memory"); \
944 __cu_len_r; \
945})
946
947#define __invoke_copy_from_kernel(to, from, n) \
948 __invoke_copy_from_user(to, from, n)
949
950
951#define ___invoke_copy_in_user(to, from, n) \
952 __invoke_copy_from_user(to, from, n)
953
954
955#define ___invoke_copy_in_kernel(to, from, n) \
956 __invoke_copy_from_user(to, from, n)
957
958#define __invoke_copy_from_user_inatomic(to, from, n) \
959({ \
960 register void *__cu_to_r __asm__("$4"); \
961 register const void __user *__cu_from_r __asm__("$5"); \
962 register long __cu_len_r __asm__("$6"); \
963 \
964 __cu_to_r = (to); \
965 __cu_from_r = (from); \
966 __cu_len_r = (n); \
967 __asm__ __volatile__( \
968 ".set\tnoreorder\n\t" \
969 __MODULE_JAL(__copy_user_inatomic) \
970 ".set\tnoat\n\t" \
971 __UA_ADDU "\t$1, %1, %2\n\t" \
972 ".set\tat\n\t" \
973 ".set\treorder" \
974 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
975 : \
976 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
977 DADDI_SCRATCH, "memory"); \
978 __cu_len_r; \
979})
980
981#define __invoke_copy_from_kernel_inatomic(to, from, n) \
982 __invoke_copy_from_user_inatomic(to, from, n) \
983
984#else
985
986
987
988extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
989 size_t __n);
990extern size_t __copy_from_user_eva(void *__to, const void *__from,
991 size_t __n);
992extern size_t __copy_to_user_eva(void *__to, const void *__from,
993 size_t __n);
994extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
995
996#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
997({ \
998 register void *__cu_to_r __asm__("$4"); \
999 register const void __user *__cu_from_r __asm__("$5"); \
1000 register long __cu_len_r __asm__("$6"); \
1001 \
1002 __cu_to_r = (to); \
1003 __cu_from_r = (from); \
1004 __cu_len_r = (n); \
1005 __asm__ __volatile__( \
1006 ".set\tnoreorder\n\t" \
1007 __MODULE_JAL(func_ptr) \
1008 ".set\tnoat\n\t" \
1009 __UA_ADDU "\t$1, %1, %2\n\t" \
1010 ".set\tat\n\t" \
1011 ".set\treorder" \
1012 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1013 : \
1014 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1015 DADDI_SCRATCH, "memory"); \
1016 __cu_len_r; \
1017})
1018
1019#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
1020({ \
1021 register void *__cu_to_r __asm__("$4"); \
1022 register const void __user *__cu_from_r __asm__("$5"); \
1023 register long __cu_len_r __asm__("$6"); \
1024 \
1025 __cu_to_r = (to); \
1026 __cu_from_r = (from); \
1027 __cu_len_r = (n); \
1028 __asm__ __volatile__( \
1029 __MODULE_JAL(func_ptr) \
1030 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1031 : \
1032 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1033 DADDI_SCRATCH, "memory"); \
1034 __cu_len_r; \
1035})
1036
1037
1038
1039
1040
1041#define __invoke_copy_from_user(to, from, n) \
1042 __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1043
1044#define __invoke_copy_from_user_inatomic(to, from, n) \
1045 __invoke_copy_from_user_eva_generic(to, from, n, \
1046 __copy_user_inatomic_eva)
1047
1048#define __invoke_copy_to_user(to, from, n) \
1049 __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1050
1051#define ___invoke_copy_in_user(to, from, n) \
1052 __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1053
1054
1055
1056
1057
1058#define __invoke_copy_from_kernel(to, from, n) \
1059 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1060
1061#define __invoke_copy_from_kernel_inatomic(to, from, n) \
1062 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1063
1064#define __invoke_copy_to_kernel(to, from, n) \
1065 __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1066
1067#define ___invoke_copy_in_kernel(to, from, n) \
1068 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1069
1070#endif
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089#define __copy_from_user(to, from, n) \
1090({ \
1091 void *__cu_to; \
1092 const void __user *__cu_from; \
1093 long __cu_len; \
1094 \
1095 __cu_to = (to); \
1096 __cu_from = (from); \
1097 __cu_len = (n); \
1098 might_fault(); \
1099 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
1100 __cu_len); \
1101 __cu_len; \
1102})
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120#define copy_from_user(to, from, n) \
1121({ \
1122 void *__cu_to; \
1123 const void __user *__cu_from; \
1124 long __cu_len; \
1125 \
1126 __cu_to = (to); \
1127 __cu_from = (from); \
1128 __cu_len = (n); \
1129 if (segment_eq(get_fs(), get_ds())) { \
1130 __cu_len = __invoke_copy_from_kernel(__cu_to, \
1131 __cu_from, \
1132 __cu_len); \
1133 } else { \
1134 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
1135 might_fault(); \
1136 __cu_len = __invoke_copy_from_user(__cu_to, \
1137 __cu_from, \
1138 __cu_len); \
1139 } \
1140 } \
1141 __cu_len; \
1142})
1143
1144#define __copy_in_user(to, from, n) \
1145({ \
1146 void __user *__cu_to; \
1147 const void __user *__cu_from; \
1148 long __cu_len; \
1149 \
1150 __cu_to = (to); \
1151 __cu_from = (from); \
1152 __cu_len = (n); \
1153 if (segment_eq(get_fs(), get_ds())) { \
1154 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1155 __cu_len); \
1156 } else { \
1157 might_fault(); \
1158 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
1159 __cu_len); \
1160 } \
1161 __cu_len; \
1162})
1163
1164#define copy_in_user(to, from, n) \
1165({ \
1166 void __user *__cu_to; \
1167 const void __user *__cu_from; \
1168 long __cu_len; \
1169 \
1170 __cu_to = (to); \
1171 __cu_from = (from); \
1172 __cu_len = (n); \
1173 if (segment_eq(get_fs(), get_ds())) { \
1174 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
1175 __cu_len); \
1176 } else { \
1177 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1178 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1179 might_fault(); \
1180 __cu_len = ___invoke_copy_in_user(__cu_to, \
1181 __cu_from, \
1182 __cu_len); \
1183 } \
1184 } \
1185 __cu_len; \
1186})
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199static inline __kernel_size_t
1200__clear_user(void __user *addr, __kernel_size_t size)
1201{
1202 __kernel_size_t res;
1203
1204 might_fault();
1205 __asm__ __volatile__(
1206 "move\t$4, %1\n\t"
1207 "move\t$5, $0\n\t"
1208 "move\t$6, %2\n\t"
1209 __MODULE_JAL(__bzero)
1210 "move\t%0, $6"
1211 : "=r" (res)
1212 : "r" (addr), "r" (size)
1213 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1214
1215 return res;
1216}
1217
1218#define clear_user(addr,n) \
1219({ \
1220 void __user * __cl_addr = (addr); \
1221 unsigned long __cl_size = (n); \
1222 if (__cl_size && access_ok(VERIFY_WRITE, \
1223 __cl_addr, __cl_size)) \
1224 __cl_size = __clear_user(__cl_addr, __cl_size); \
1225 __cl_size; \
1226})
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248static inline long
1249__strncpy_from_user(char *__to, const char __user *__from, long __len)
1250{
1251 long res;
1252
1253 if (segment_eq(get_fs(), get_ds())) {
1254 __asm__ __volatile__(
1255 "move\t$4, %1\n\t"
1256 "move\t$5, %2\n\t"
1257 "move\t$6, %3\n\t"
1258 __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1259 "move\t%0, $2"
1260 : "=r" (res)
1261 : "r" (__to), "r" (__from), "r" (__len)
1262 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1263 } else {
1264 might_fault();
1265 __asm__ __volatile__(
1266 "move\t$4, %1\n\t"
1267 "move\t$5, %2\n\t"
1268 "move\t$6, %3\n\t"
1269 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1270 "move\t%0, $2"
1271 : "=r" (res)
1272 : "r" (__to), "r" (__from), "r" (__len)
1273 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1274 }
1275
1276 return res;
1277}
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297static inline long
1298strncpy_from_user(char *__to, const char __user *__from, long __len)
1299{
1300 long res;
1301
1302 if (segment_eq(get_fs(), get_ds())) {
1303 __asm__ __volatile__(
1304 "move\t$4, %1\n\t"
1305 "move\t$5, %2\n\t"
1306 "move\t$6, %3\n\t"
1307 __MODULE_JAL(__strncpy_from_kernel_asm)
1308 "move\t%0, $2"
1309 : "=r" (res)
1310 : "r" (__to), "r" (__from), "r" (__len)
1311 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1312 } else {
1313 might_fault();
1314 __asm__ __volatile__(
1315 "move\t$4, %1\n\t"
1316 "move\t$5, %2\n\t"
1317 "move\t$6, %3\n\t"
1318 __MODULE_JAL(__strncpy_from_user_asm)
1319 "move\t%0, $2"
1320 : "=r" (res)
1321 : "r" (__to), "r" (__from), "r" (__len)
1322 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1323 }
1324
1325 return res;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342static inline long strlen_user(const char __user *s)
1343{
1344 long res;
1345
1346 if (segment_eq(get_fs(), get_ds())) {
1347 __asm__ __volatile__(
1348 "move\t$4, %1\n\t"
1349 __MODULE_JAL(__strlen_kernel_asm)
1350 "move\t%0, $2"
1351 : "=r" (res)
1352 : "r" (s)
1353 : "$2", "$4", __UA_t0, "$31");
1354 } else {
1355 might_fault();
1356 __asm__ __volatile__(
1357 "move\t$4, %1\n\t"
1358 __MODULE_JAL(__strlen_kernel_asm)
1359 "move\t%0, $2"
1360 : "=r" (res)
1361 : "r" (s)
1362 : "$2", "$4", __UA_t0, "$31");
1363 }
1364
1365 return res;
1366}
1367
1368
1369static inline long __strnlen_user(const char __user *s, long n)
1370{
1371 long res;
1372
1373 if (segment_eq(get_fs(), get_ds())) {
1374 __asm__ __volatile__(
1375 "move\t$4, %1\n\t"
1376 "move\t$5, %2\n\t"
1377 __MODULE_JAL(__strnlen_kernel_nocheck_asm)
1378 "move\t%0, $2"
1379 : "=r" (res)
1380 : "r" (s), "r" (n)
1381 : "$2", "$4", "$5", __UA_t0, "$31");
1382 } else {
1383 might_fault();
1384 __asm__ __volatile__(
1385 "move\t$4, %1\n\t"
1386 "move\t$5, %2\n\t"
1387 __MODULE_JAL(__strnlen_user_nocheck_asm)
1388 "move\t%0, $2"
1389 : "=r" (res)
1390 : "r" (s), "r" (n)
1391 : "$2", "$4", "$5", __UA_t0, "$31");
1392 }
1393
1394 return res;
1395}
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409static inline long strnlen_user(const char __user *s, long n)
1410{
1411 long res;
1412
1413 might_fault();
1414 if (segment_eq(get_fs(), get_ds())) {
1415 __asm__ __volatile__(
1416 "move\t$4, %1\n\t"
1417 "move\t$5, %2\n\t"
1418 __MODULE_JAL(__strnlen_kernel_asm)
1419 "move\t%0, $2"
1420 : "=r" (res)
1421 : "r" (s), "r" (n)
1422 : "$2", "$4", "$5", __UA_t0, "$31");
1423 } else {
1424 __asm__ __volatile__(
1425 "move\t$4, %1\n\t"
1426 "move\t$5, %2\n\t"
1427 __MODULE_JAL(__strnlen_user_asm)
1428 "move\t%0, $2"
1429 : "=r" (res)
1430 : "r" (s), "r" (n)
1431 : "$2", "$4", "$5", __UA_t0, "$31");
1432 }
1433
1434 return res;
1435}
1436
1437struct exception_table_entry
1438{
1439 unsigned long insn;
1440 unsigned long nextinsn;
1441};
1442
1443extern int fixup_exception(struct pt_regs *regs);
1444
1445#endif
1446