1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_UACCESS_H
12#define _ASM_UACCESS_H
13
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <asm/asm-eva.h>
17#include <asm/extable.h>
18
19
20
21
22
23
24
25
26#ifdef CONFIG_32BIT
27
28#ifdef CONFIG_KVM_GUEST
29#define __UA_LIMIT 0x40000000UL
30#else
31#define __UA_LIMIT 0x80000000UL
32#endif
33
34#define __UA_ADDR ".word"
35#define __UA_LA "la"
36#define __UA_ADDU "addu"
37#define __UA_t0 "$8"
38#define __UA_t1 "$9"
39
40#endif
41
42#ifdef CONFIG_64BIT
43
44extern u64 __ua_limit;
45
46#define __UA_LIMIT __ua_limit
47
48#define __UA_ADDR ".dword"
49#define __UA_LA "dla"
50#define __UA_ADDU "daddu"
51#define __UA_t0 "$12"
52#define __UA_t1 "$13"
53
54#endif
55
56
57
58
59
60
61
62
63
64#ifdef CONFIG_KVM_GUEST
65#define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
66#define USER_DS ((mm_segment_t) { 0xC0000000UL })
67#else
68#define KERNEL_DS ((mm_segment_t) { 0UL })
69#define USER_DS ((mm_segment_t) { __UA_LIMIT })
70#endif
71
72#define get_ds() (KERNEL_DS)
73#define get_fs() (current_thread_info()->addr_limit)
74#define set_fs(x) (current_thread_info()->addr_limit = (x))
75
76#define segment_eq(a, b) ((a).seg == (b).seg)
77
78
79
80
81
82
83
84
85
86static inline bool eva_kernel_access(void)
87{
88 if (!IS_ENABLED(CONFIG_EVA))
89 return false;
90
91 return uaccess_kernel();
92}
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107#define __ua_size(size) \
108 ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131static inline int __access_ok(const void __user *p, unsigned long size)
132{
133 unsigned long addr = (unsigned long)p;
134 return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
135}
136
137#define access_ok(type, addr, size) \
138 likely(__access_ok((addr), (size)))
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157#define put_user(x,ptr) \
158 __put_user_check((x), (ptr), sizeof(*(ptr)))
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178#define get_user(x,ptr) \
179 __get_user_check((x), (ptr), sizeof(*(ptr)))
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201#define __put_user(x,ptr) \
202 __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225#define __get_user(x,ptr) \
226 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
227
228struct __large_struct { unsigned long buf[100]; };
229#define __m(x) (*(struct __large_struct __user *)(x))
230
231
232
233
234
235#ifndef CONFIG_EVA
236#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
237#else
238
239
240
241
242
243#undef _loadd
244#undef _loadw
245#undef _loadh
246#undef _loadb
247#ifdef CONFIG_32BIT
248#define _loadd _loadw
249#else
250#define _loadd(reg, addr) "ld " reg ", " addr
251#endif
252#define _loadw(reg, addr) "lw " reg ", " addr
253#define _loadh(reg, addr) "lh " reg ", " addr
254#define _loadb(reg, addr) "lb " reg ", " addr
255
256#define __get_kernel_common(val, size, ptr) \
257do { \
258 switch (size) { \
259 case 1: __get_data_asm(val, _loadb, ptr); break; \
260 case 2: __get_data_asm(val, _loadh, ptr); break; \
261 case 4: __get_data_asm(val, _loadw, ptr); break; \
262 case 8: __GET_DW(val, _loadd, ptr); break; \
263 default: __get_user_unknown(); break; \
264 } \
265} while (0)
266#endif
267
268#ifdef CONFIG_32BIT
269#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
270#endif
271#ifdef CONFIG_64BIT
272#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
273#endif
274
275extern void __get_user_unknown(void);
276
277#define __get_user_common(val, size, ptr) \
278do { \
279 switch (size) { \
280 case 1: __get_data_asm(val, user_lb, ptr); break; \
281 case 2: __get_data_asm(val, user_lh, ptr); break; \
282 case 4: __get_data_asm(val, user_lw, ptr); break; \
283 case 8: __GET_DW(val, user_ld, ptr); break; \
284 default: __get_user_unknown(); break; \
285 } \
286} while (0)
287
288#define __get_user_nocheck(x, ptr, size) \
289({ \
290 int __gu_err; \
291 \
292 if (eva_kernel_access()) { \
293 __get_kernel_common((x), size, ptr); \
294 } else { \
295 __chk_user_ptr(ptr); \
296 __get_user_common((x), size, ptr); \
297 } \
298 __gu_err; \
299})
300
301#define __get_user_check(x, ptr, size) \
302({ \
303 int __gu_err = -EFAULT; \
304 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
305 \
306 might_fault(); \
307 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \
308 if (eva_kernel_access()) \
309 __get_kernel_common((x), size, __gu_ptr); \
310 else \
311 __get_user_common((x), size, __gu_ptr); \
312 } else \
313 (x) = 0; \
314 \
315 __gu_err; \
316})
317
318#define __get_data_asm(val, insn, addr) \
319{ \
320 long __gu_tmp; \
321 \
322 __asm__ __volatile__( \
323 "1: "insn("%1", "%3")" \n" \
324 "2: \n" \
325 " .insn \n" \
326 " .section .fixup,\"ax\" \n" \
327 "3: li %0, %4 \n" \
328 " move %1, $0 \n" \
329 " j 2b \n" \
330 " .previous \n" \
331 " .section __ex_table,\"a\" \n" \
332 " "__UA_ADDR "\t1b, 3b \n" \
333 " .previous \n" \
334 : "=r" (__gu_err), "=r" (__gu_tmp) \
335 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
336 \
337 (val) = (__typeof__(*(addr))) __gu_tmp; \
338}
339
340
341
342
343#define __get_data_asm_ll32(val, insn, addr) \
344{ \
345 union { \
346 unsigned long long l; \
347 __typeof__(*(addr)) t; \
348 } __gu_tmp; \
349 \
350 __asm__ __volatile__( \
351 "1: " insn("%1", "(%3)")" \n" \
352 "2: " insn("%D1", "4(%3)")" \n" \
353 "3: \n" \
354 " .insn \n" \
355 " .section .fixup,\"ax\" \n" \
356 "4: li %0, %4 \n" \
357 " move %1, $0 \n" \
358 " move %D1, $0 \n" \
359 " j 3b \n" \
360 " .previous \n" \
361 " .section __ex_table,\"a\" \n" \
362 " " __UA_ADDR " 1b, 4b \n" \
363 " " __UA_ADDR " 2b, 4b \n" \
364 " .previous \n" \
365 : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
366 : "0" (0), "r" (addr), "i" (-EFAULT)); \
367 \
368 (val) = __gu_tmp.t; \
369}
370
371#ifndef CONFIG_EVA
372#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
373#else
374
375
376
377
378
379#undef _stored
380#undef _storew
381#undef _storeh
382#undef _storeb
383#ifdef CONFIG_32BIT
384#define _stored _storew
385#else
386#define _stored(reg, addr) "ld " reg ", " addr
387#endif
388
389#define _storew(reg, addr) "sw " reg ", " addr
390#define _storeh(reg, addr) "sh " reg ", " addr
391#define _storeb(reg, addr) "sb " reg ", " addr
392
393#define __put_kernel_common(ptr, size) \
394do { \
395 switch (size) { \
396 case 1: __put_data_asm(_storeb, ptr); break; \
397 case 2: __put_data_asm(_storeh, ptr); break; \
398 case 4: __put_data_asm(_storew, ptr); break; \
399 case 8: __PUT_DW(_stored, ptr); break; \
400 default: __put_user_unknown(); break; \
401 } \
402} while(0)
403#endif
404
405
406
407
408
409#ifdef CONFIG_32BIT
410#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
411#endif
412#ifdef CONFIG_64BIT
413#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
414#endif
415
416#define __put_user_common(ptr, size) \
417do { \
418 switch (size) { \
419 case 1: __put_data_asm(user_sb, ptr); break; \
420 case 2: __put_data_asm(user_sh, ptr); break; \
421 case 4: __put_data_asm(user_sw, ptr); break; \
422 case 8: __PUT_DW(user_sd, ptr); break; \
423 default: __put_user_unknown(); break; \
424 } \
425} while (0)
426
427#define __put_user_nocheck(x, ptr, size) \
428({ \
429 __typeof__(*(ptr)) __pu_val; \
430 int __pu_err = 0; \
431 \
432 __pu_val = (x); \
433 if (eva_kernel_access()) { \
434 __put_kernel_common(ptr, size); \
435 } else { \
436 __chk_user_ptr(ptr); \
437 __put_user_common(ptr, size); \
438 } \
439 __pu_err; \
440})
441
442#define __put_user_check(x, ptr, size) \
443({ \
444 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
445 __typeof__(*(ptr)) __pu_val = (x); \
446 int __pu_err = -EFAULT; \
447 \
448 might_fault(); \
449 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
450 if (eva_kernel_access()) \
451 __put_kernel_common(__pu_addr, size); \
452 else \
453 __put_user_common(__pu_addr, size); \
454 } \
455 \
456 __pu_err; \
457})
458
459#define __put_data_asm(insn, ptr) \
460{ \
461 __asm__ __volatile__( \
462 "1: "insn("%z2", "%3")" # __put_data_asm \n" \
463 "2: \n" \
464 " .insn \n" \
465 " .section .fixup,\"ax\" \n" \
466 "3: li %0, %4 \n" \
467 " j 2b \n" \
468 " .previous \n" \
469 " .section __ex_table,\"a\" \n" \
470 " " __UA_ADDR " 1b, 3b \n" \
471 " .previous \n" \
472 : "=r" (__pu_err) \
473 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
474 "i" (-EFAULT)); \
475}
476
477#define __put_data_asm_ll32(insn, ptr) \
478{ \
479 __asm__ __volatile__( \
480 "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
481 "2: "insn("%D2", "4(%3)")" \n" \
482 "3: \n" \
483 " .insn \n" \
484 " .section .fixup,\"ax\" \n" \
485 "4: li %0, %4 \n" \
486 " j 3b \n" \
487 " .previous \n" \
488 " .section __ex_table,\"a\" \n" \
489 " " __UA_ADDR " 1b, 4b \n" \
490 " " __UA_ADDR " 2b, 4b \n" \
491 " .previous" \
492 : "=r" (__pu_err) \
493 : "0" (0), "r" (__pu_val), "r" (ptr), \
494 "i" (-EFAULT)); \
495}
496
497extern void __put_user_unknown(void);
498
499
500
501
502
503#ifdef MODULE
504#define __MODULE_JAL(destination) \
505 ".set\tnoat\n\t" \
506 __UA_LA "\t$1, " #destination "\n\t" \
507 "jalr\t$1\n\t" \
508 ".set\tat\n\t"
509#else
510#define __MODULE_JAL(destination) \
511 "jal\t" #destination "\n\t"
512#endif
513
514#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \
515 defined(CONFIG_CPU_HAS_PREFETCH))
516#define DADDI_SCRATCH "$3"
517#else
518#define DADDI_SCRATCH "$0"
519#endif
520
521extern size_t __copy_user(void *__to, const void *__from, size_t __n);
522
523#define __invoke_copy_from(func, to, from, n) \
524({ \
525 register void *__cu_to_r __asm__("$4"); \
526 register const void __user *__cu_from_r __asm__("$5"); \
527 register long __cu_len_r __asm__("$6"); \
528 \
529 __cu_to_r = (to); \
530 __cu_from_r = (from); \
531 __cu_len_r = (n); \
532 __asm__ __volatile__( \
533 ".set\tnoreorder\n\t" \
534 __MODULE_JAL(func) \
535 ".set\tnoat\n\t" \
536 __UA_ADDU "\t$1, %1, %2\n\t" \
537 ".set\tat\n\t" \
538 ".set\treorder" \
539 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
540 : \
541 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
542 DADDI_SCRATCH, "memory"); \
543 __cu_len_r; \
544})
545
546#define __invoke_copy_to(func, to, from, n) \
547({ \
548 register void __user *__cu_to_r __asm__("$4"); \
549 register const void *__cu_from_r __asm__("$5"); \
550 register long __cu_len_r __asm__("$6"); \
551 \
552 __cu_to_r = (to); \
553 __cu_from_r = (from); \
554 __cu_len_r = (n); \
555 __asm__ __volatile__( \
556 __MODULE_JAL(func) \
557 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
558 : \
559 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
560 DADDI_SCRATCH, "memory"); \
561 __cu_len_r; \
562})
563
564#define __invoke_copy_from_kernel(to, from, n) \
565 __invoke_copy_from(__copy_user, to, from, n)
566
567#define __invoke_copy_to_kernel(to, from, n) \
568 __invoke_copy_to(__copy_user, to, from, n)
569
570#define ___invoke_copy_in_kernel(to, from, n) \
571 __invoke_copy_from(__copy_user, to, from, n)
572
573#ifndef CONFIG_EVA
574#define __invoke_copy_from_user(to, from, n) \
575 __invoke_copy_from(__copy_user, to, from, n)
576
577#define __invoke_copy_to_user(to, from, n) \
578 __invoke_copy_to(__copy_user, to, from, n)
579
580#define ___invoke_copy_in_user(to, from, n) \
581 __invoke_copy_from(__copy_user, to, from, n)
582
583#else
584
585
586
587extern size_t __copy_from_user_eva(void *__to, const void *__from,
588 size_t __n);
589extern size_t __copy_to_user_eva(void *__to, const void *__from,
590 size_t __n);
591extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
592
593
594
595
596
597#define __invoke_copy_from_user(to, from, n) \
598 __invoke_copy_from(__copy_from_user_eva, to, from, n)
599
600#define __invoke_copy_to_user(to, from, n) \
601 __invoke_copy_to(__copy_to_user_eva, to, from, n)
602
603#define ___invoke_copy_in_user(to, from, n) \
604 __invoke_copy_from(__copy_in_user_eva, to, from, n)
605
606#endif
607
608static inline unsigned long
609raw_copy_to_user(void __user *to, const void *from, unsigned long n)
610{
611 if (eva_kernel_access())
612 return __invoke_copy_to_kernel(to, from, n);
613 else
614 return __invoke_copy_to_user(to, from, n);
615}
616
617static inline unsigned long
618raw_copy_from_user(void *to, const void __user *from, unsigned long n)
619{
620 if (eva_kernel_access())
621 return __invoke_copy_from_kernel(to, from, n);
622 else
623 return __invoke_copy_from_user(to, from, n);
624}
625
626#define INLINE_COPY_FROM_USER
627#define INLINE_COPY_TO_USER
628
629static inline unsigned long
630raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
631{
632 if (eva_kernel_access())
633 return ___invoke_copy_in_kernel(to, from, n);
634 else
635 return ___invoke_copy_in_user(to, from, n);
636}
637
638extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
639extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
640
641
642
643
644
645
646
647
648
649
650
651
652static inline __kernel_size_t
653__clear_user(void __user *addr, __kernel_size_t size)
654{
655 __kernel_size_t res;
656
657 if (eva_kernel_access()) {
658 __asm__ __volatile__(
659 "move\t$4, %1\n\t"
660 "move\t$5, $0\n\t"
661 "move\t$6, %2\n\t"
662 __MODULE_JAL(__bzero_kernel)
663 "move\t%0, $6"
664 : "=r" (res)
665 : "r" (addr), "r" (size)
666 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
667 } else {
668 might_fault();
669 __asm__ __volatile__(
670 "move\t$4, %1\n\t"
671 "move\t$5, $0\n\t"
672 "move\t$6, %2\n\t"
673 __MODULE_JAL(__bzero)
674 "move\t%0, $6"
675 : "=r" (res)
676 : "r" (addr), "r" (size)
677 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
678 }
679
680 return res;
681}
682
683#define clear_user(addr,n) \
684({ \
685 void __user * __cl_addr = (addr); \
686 unsigned long __cl_size = (n); \
687 if (__cl_size && access_ok(VERIFY_WRITE, \
688 __cl_addr, __cl_size)) \
689 __cl_size = __clear_user(__cl_addr, __cl_size); \
690 __cl_size; \
691})
692
693extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
694extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714static inline long
715strncpy_from_user(char *__to, const char __user *__from, long __len)
716{
717 long res;
718
719 if (eva_kernel_access()) {
720 __asm__ __volatile__(
721 "move\t$4, %1\n\t"
722 "move\t$5, %2\n\t"
723 "move\t$6, %3\n\t"
724 __MODULE_JAL(__strncpy_from_kernel_asm)
725 "move\t%0, $2"
726 : "=r" (res)
727 : "r" (__to), "r" (__from), "r" (__len)
728 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
729 } else {
730 might_fault();
731 __asm__ __volatile__(
732 "move\t$4, %1\n\t"
733 "move\t$5, %2\n\t"
734 "move\t$6, %3\n\t"
735 __MODULE_JAL(__strncpy_from_user_asm)
736 "move\t%0, $2"
737 : "=r" (res)
738 : "r" (__to), "r" (__from), "r" (__len)
739 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
740 }
741
742 return res;
743}
744
745extern long __strnlen_kernel_asm(const char __user *s, long n);
746extern long __strnlen_user_asm(const char __user *s, long n);
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761static inline long strnlen_user(const char __user *s, long n)
762{
763 long res;
764
765 might_fault();
766 if (eva_kernel_access()) {
767 __asm__ __volatile__(
768 "move\t$4, %1\n\t"
769 "move\t$5, %2\n\t"
770 __MODULE_JAL(__strnlen_kernel_asm)
771 "move\t%0, $2"
772 : "=r" (res)
773 : "r" (s), "r" (n)
774 : "$2", "$4", "$5", __UA_t0, "$31");
775 } else {
776 __asm__ __volatile__(
777 "move\t$4, %1\n\t"
778 "move\t$5, %2\n\t"
779 __MODULE_JAL(__strnlen_user_asm)
780 "move\t%0, $2"
781 : "=r" (res)
782 : "r" (s), "r" (n)
783 : "$2", "$4", "$5", __UA_t0, "$31");
784 }
785
786 return res;
787}
788
789#endif
790