1
2
3
4
5#ifndef _ASMARM_UACCESS_H
6#define _ASMARM_UACCESS_H
7
8
9
10
11#include <linux/string.h>
12#include <asm/memory.h>
13#include <asm/domain.h>
14#include <asm/unified.h>
15#include <asm/compiler.h>
16
17#include <asm/extable.h>
18
19
20
21
22
23
24
25static __always_inline unsigned int uaccess_save_and_enable(void)
26{
27#ifdef CONFIG_CPU_SW_DOMAIN_PAN
28 unsigned int old_domain = get_domain();
29
30
31 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
32 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
33
34 return old_domain;
35#else
36 return 0;
37#endif
38}
39
40static __always_inline void uaccess_restore(unsigned int flags)
41{
42#ifdef CONFIG_CPU_SW_DOMAIN_PAN
43
44 set_domain(flags);
45#endif
46}
47
48
49
50
51
52extern int __get_user_bad(void);
53extern int __put_user_bad(void);
54
55#ifdef CONFIG_MMU
56
57
58
59
60
61
62#define __range_ok(addr, size) ({ \
63 unsigned long flag, roksum; \
64 __chk_user_ptr(addr); \
65 __asm__(".syntax unified\n" \
66 "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
67 : "=&r" (flag), "=&r" (roksum) \
68 : "r" (addr), "Ir" (size), "0" (TASK_SIZE) \
69 : "cc"); \
70 flag; })
71
72
73
74
75
76#define __inttype(x) \
77 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
78
79
80
81
82
83#define uaccess_mask_range_ptr(ptr, size) \
84 ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
85static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
86 size_t size)
87{
88 void __user *safe_ptr = (void __user *)ptr;
89 unsigned long tmp;
90
91 asm volatile(
92 " .syntax unified\n"
93 " sub %1, %3, #1\n"
94 " subs %1, %1, %0\n"
95 " addhs %1, %1, #1\n"
96 " subshs %1, %1, %2\n"
97 " movlo %0, #0\n"
98 : "+r" (safe_ptr), "=&r" (tmp)
99 : "r" (size), "r" (TASK_SIZE)
100 : "cc");
101
102 csdb();
103 return safe_ptr;
104}
105
106
107
108
109
110
111
112
113
114
115
116
117extern int __get_user_1(void *);
118extern int __get_user_2(void *);
119extern int __get_user_4(void *);
120extern int __get_user_32t_8(void *);
121extern int __get_user_8(void *);
122extern int __get_user_64t_1(void *);
123extern int __get_user_64t_2(void *);
124extern int __get_user_64t_4(void *);
125
126#define __GUP_CLOBBER_1 "lr", "cc"
127#ifdef CONFIG_CPU_USE_DOMAINS
128#define __GUP_CLOBBER_2 "ip", "lr", "cc"
129#else
130#define __GUP_CLOBBER_2 "lr", "cc"
131#endif
132#define __GUP_CLOBBER_4 "lr", "cc"
133#define __GUP_CLOBBER_32t_8 "lr", "cc"
134#define __GUP_CLOBBER_8 "lr", "cc"
135
136#define __get_user_x(__r2, __p, __e, __l, __s) \
137 __asm__ __volatile__ ( \
138 __asmeq("%0", "r0") __asmeq("%1", "r2") \
139 __asmeq("%3", "r1") \
140 "bl __get_user_" #__s \
141 : "=&r" (__e), "=r" (__r2) \
142 : "0" (__p), "r" (__l) \
143 : __GUP_CLOBBER_##__s)
144
145
146#ifdef __ARMEB__
147#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
148 __get_user_x(__r2, __p, __e, __l, 32t_8)
149#else
150#define __get_user_x_32t __get_user_x
151#endif
152
153
154
155
156
157#ifdef __ARMEB__
158#define __get_user_x_64t(__r2, __p, __e, __l, __s) \
159 __asm__ __volatile__ ( \
160 __asmeq("%0", "r0") __asmeq("%1", "r2") \
161 __asmeq("%3", "r1") \
162 "bl __get_user_64t_" #__s \
163 : "=&r" (__e), "=r" (__r2) \
164 : "0" (__p), "r" (__l) \
165 : __GUP_CLOBBER_##__s)
166#else
167#define __get_user_x_64t __get_user_x
168#endif
169
170
171#define __get_user_check(x, p) \
172 ({ \
173 unsigned long __limit = TASK_SIZE - 1; \
174 register typeof(*(p)) __user *__p asm("r0") = (p); \
175 register __inttype(x) __r2 asm("r2"); \
176 register unsigned long __l asm("r1") = __limit; \
177 register int __e asm("r0"); \
178 unsigned int __ua_flags = uaccess_save_and_enable(); \
179 int __tmp_e; \
180 switch (sizeof(*(__p))) { \
181 case 1: \
182 if (sizeof((x)) >= 8) \
183 __get_user_x_64t(__r2, __p, __e, __l, 1); \
184 else \
185 __get_user_x(__r2, __p, __e, __l, 1); \
186 break; \
187 case 2: \
188 if (sizeof((x)) >= 8) \
189 __get_user_x_64t(__r2, __p, __e, __l, 2); \
190 else \
191 __get_user_x(__r2, __p, __e, __l, 2); \
192 break; \
193 case 4: \
194 if (sizeof((x)) >= 8) \
195 __get_user_x_64t(__r2, __p, __e, __l, 4); \
196 else \
197 __get_user_x(__r2, __p, __e, __l, 4); \
198 break; \
199 case 8: \
200 if (sizeof((x)) < 8) \
201 __get_user_x_32t(__r2, __p, __e, __l, 4); \
202 else \
203 __get_user_x(__r2, __p, __e, __l, 8); \
204 break; \
205 default: __e = __get_user_bad(); break; \
206 } \
207 __tmp_e = __e; \
208 uaccess_restore(__ua_flags); \
209 x = (typeof(*(p))) __r2; \
210 __tmp_e; \
211 })
212
213#define get_user(x, p) \
214 ({ \
215 might_fault(); \
216 __get_user_check(x, p); \
217 })
218
219extern int __put_user_1(void *, unsigned int);
220extern int __put_user_2(void *, unsigned int);
221extern int __put_user_4(void *, unsigned int);
222extern int __put_user_8(void *, unsigned long long);
223
224#define __put_user_check(__pu_val, __ptr, __err, __s) \
225 ({ \
226 unsigned long __limit = TASK_SIZE - 1; \
227 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
228 register const void __user *__p asm("r0") = __ptr; \
229 register unsigned long __l asm("r1") = __limit; \
230 register int __e asm("r0"); \
231 __asm__ __volatile__ ( \
232 __asmeq("%0", "r0") __asmeq("%2", "r2") \
233 __asmeq("%3", "r1") \
234 "bl __put_user_" #__s \
235 : "=&r" (__e) \
236 : "0" (__p), "r" (__r2), "r" (__l) \
237 : "ip", "lr", "cc"); \
238 __err = __e; \
239 })
240
241#else
242
243#define __addr_ok(addr) ((void)(addr), 1)
244#define __range_ok(addr, size) ((void)(addr), 0)
245
246#define get_user(x, p) __get_user(x, p)
247#define __put_user_check __put_user_nocheck
248
249#endif
250
251#define access_ok(addr, size) (__range_ok(addr, size) == 0)
252
253#ifdef CONFIG_CPU_SPECTRE
254
255
256
257
258
259
260#define __get_user(x, ptr) get_user(x, ptr)
261#else
262
263
264
265
266
267
268
269
270
271
272#define __get_user(x, ptr) \
273({ \
274 long __gu_err = 0; \
275 __get_user_err((x), (ptr), __gu_err, TUSER()); \
276 __gu_err; \
277})
278
279#define __get_user_err(x, ptr, err, __t) \
280do { \
281 unsigned long __gu_addr = (unsigned long)(ptr); \
282 unsigned long __gu_val; \
283 unsigned int __ua_flags; \
284 __chk_user_ptr(ptr); \
285 might_fault(); \
286 __ua_flags = uaccess_save_and_enable(); \
287 switch (sizeof(*(ptr))) { \
288 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
289 case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
290 case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
291 default: (__gu_val) = __get_user_bad(); \
292 } \
293 uaccess_restore(__ua_flags); \
294 (x) = (__typeof__(*(ptr)))__gu_val; \
295} while (0)
296#endif
297
298#define __get_user_asm(x, addr, err, instr) \
299 __asm__ __volatile__( \
300 "1: " instr " %1, [%2], #0\n" \
301 "2:\n" \
302 " .pushsection .text.fixup,\"ax\"\n" \
303 " .align 2\n" \
304 "3: mov %0, %3\n" \
305 " mov %1, #0\n" \
306 " b 2b\n" \
307 " .popsection\n" \
308 " .pushsection __ex_table,\"a\"\n" \
309 " .align 3\n" \
310 " .long 1b, 3b\n" \
311 " .popsection" \
312 : "+r" (err), "=&r" (x) \
313 : "r" (addr), "i" (-EFAULT) \
314 : "cc")
315
316#define __get_user_asm_byte(x, addr, err, __t) \
317 __get_user_asm(x, addr, err, "ldrb" __t)
318
319#if __LINUX_ARM_ARCH__ >= 6
320
321#define __get_user_asm_half(x, addr, err, __t) \
322 __get_user_asm(x, addr, err, "ldrh" __t)
323
324#else
325
326#ifndef __ARMEB__
327#define __get_user_asm_half(x, __gu_addr, err, __t) \
328({ \
329 unsigned long __b1, __b2; \
330 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
331 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
332 (x) = __b1 | (__b2 << 8); \
333})
334#else
335#define __get_user_asm_half(x, __gu_addr, err, __t) \
336({ \
337 unsigned long __b1, __b2; \
338 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
339 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
340 (x) = (__b1 << 8) | __b2; \
341})
342#endif
343
344#endif
345
346#define __get_user_asm_word(x, addr, err, __t) \
347 __get_user_asm(x, addr, err, "ldr" __t)
348
349#define __put_user_switch(x, ptr, __err, __fn) \
350 do { \
351 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
352 __typeof__(*(ptr)) __pu_val = (x); \
353 unsigned int __ua_flags; \
354 might_fault(); \
355 __ua_flags = uaccess_save_and_enable(); \
356 switch (sizeof(*(ptr))) { \
357 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
358 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
359 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
360 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
361 default: __err = __put_user_bad(); break; \
362 } \
363 uaccess_restore(__ua_flags); \
364 } while (0)
365
366#define put_user(x, ptr) \
367({ \
368 int __pu_err = 0; \
369 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
370 __pu_err; \
371})
372
373#ifdef CONFIG_CPU_SPECTRE
374
375
376
377
378#define __put_user(x, ptr) put_user(x, ptr)
379
380#else
381#define __put_user(x, ptr) \
382({ \
383 long __pu_err = 0; \
384 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
385 __pu_err; \
386})
387
388#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
389 do { \
390 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
391 __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
392 } while (0)
393
394#define __put_user_nocheck_1 __put_user_asm_byte
395#define __put_user_nocheck_2 __put_user_asm_half
396#define __put_user_nocheck_4 __put_user_asm_word
397#define __put_user_nocheck_8 __put_user_asm_dword
398
399#endif
400
401#define __put_user_asm(x, __pu_addr, err, instr) \
402 __asm__ __volatile__( \
403 "1: " instr " %1, [%2], #0\n" \
404 "2:\n" \
405 " .pushsection .text.fixup,\"ax\"\n" \
406 " .align 2\n" \
407 "3: mov %0, %3\n" \
408 " b 2b\n" \
409 " .popsection\n" \
410 " .pushsection __ex_table,\"a\"\n" \
411 " .align 3\n" \
412 " .long 1b, 3b\n" \
413 " .popsection" \
414 : "+r" (err) \
415 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
416 : "cc")
417
418#define __put_user_asm_byte(x, __pu_addr, err, __t) \
419 __put_user_asm(x, __pu_addr, err, "strb" __t)
420
421#if __LINUX_ARM_ARCH__ >= 6
422
423#define __put_user_asm_half(x, __pu_addr, err, __t) \
424 __put_user_asm(x, __pu_addr, err, "strh" __t)
425
426#else
427
428#ifndef __ARMEB__
429#define __put_user_asm_half(x, __pu_addr, err, __t) \
430({ \
431 unsigned long __temp = (__force unsigned long)(x); \
432 __put_user_asm_byte(__temp, __pu_addr, err, __t); \
433 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
434})
435#else
436#define __put_user_asm_half(x, __pu_addr, err, __t) \
437({ \
438 unsigned long __temp = (__force unsigned long)(x); \
439 __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
440 __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
441})
442#endif
443
444#endif
445
446#define __put_user_asm_word(x, __pu_addr, err, __t) \
447 __put_user_asm(x, __pu_addr, err, "str" __t)
448
449#ifndef __ARMEB__
450#define __reg_oper0 "%R2"
451#define __reg_oper1 "%Q2"
452#else
453#define __reg_oper0 "%Q2"
454#define __reg_oper1 "%R2"
455#endif
456
457#define __put_user_asm_dword(x, __pu_addr, err, __t) \
458 __asm__ __volatile__( \
459 ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
460 ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
461 THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
462 THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
463 "3:\n" \
464 " .pushsection .text.fixup,\"ax\"\n" \
465 " .align 2\n" \
466 "4: mov %0, %3\n" \
467 " b 3b\n" \
468 " .popsection\n" \
469 " .pushsection __ex_table,\"a\"\n" \
470 " .align 3\n" \
471 " .long 1b, 4b\n" \
472 " .long 2b, 4b\n" \
473 " .popsection" \
474 : "+r" (err), "+r" (__pu_addr) \
475 : "r" (x), "i" (-EFAULT) \
476 : "cc")
477
478#define HAVE_GET_KERNEL_NOFAULT
479
480#define __get_kernel_nofault(dst, src, type, err_label) \
481do { \
482 const type *__pk_ptr = (src); \
483 unsigned long __src = (unsigned long)(__pk_ptr); \
484 type __val; \
485 int __err = 0; \
486 switch (sizeof(type)) { \
487 case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
488 case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
489 case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
490 case 8: { \
491 u32 *__v32 = (u32*)&__val; \
492 __get_user_asm_word(__v32[0], __src, __err, ""); \
493 if (__err) \
494 break; \
495 __get_user_asm_word(__v32[1], __src+4, __err, ""); \
496 break; \
497 } \
498 default: __err = __get_user_bad(); break; \
499 } \
500 *(type *)(dst) = __val; \
501 if (__err) \
502 goto err_label; \
503} while (0)
504
505#define __put_kernel_nofault(dst, src, type, err_label) \
506do { \
507 const type *__pk_ptr = (dst); \
508 unsigned long __dst = (unsigned long)__pk_ptr; \
509 int __err = 0; \
510 type __val = *(type *)src; \
511 switch (sizeof(type)) { \
512 case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
513 case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
514 case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
515 case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
516 default: __err = __put_user_bad(); break; \
517 } \
518 if (__err) \
519 goto err_label; \
520} while (0)
521
522#ifdef CONFIG_MMU
523extern unsigned long __must_check
524arm_copy_from_user(void *to, const void __user *from, unsigned long n);
525
526static inline unsigned long __must_check
527raw_copy_from_user(void *to, const void __user *from, unsigned long n)
528{
529 unsigned int __ua_flags;
530
531 __ua_flags = uaccess_save_and_enable();
532 n = arm_copy_from_user(to, from, n);
533 uaccess_restore(__ua_flags);
534 return n;
535}
536
537extern unsigned long __must_check
538arm_copy_to_user(void __user *to, const void *from, unsigned long n);
539extern unsigned long __must_check
540__copy_to_user_std(void __user *to, const void *from, unsigned long n);
541
542static inline unsigned long __must_check
543raw_copy_to_user(void __user *to, const void *from, unsigned long n)
544{
545#ifndef CONFIG_UACCESS_WITH_MEMCPY
546 unsigned int __ua_flags;
547 __ua_flags = uaccess_save_and_enable();
548 n = arm_copy_to_user(to, from, n);
549 uaccess_restore(__ua_flags);
550 return n;
551#else
552 return arm_copy_to_user(to, from, n);
553#endif
554}
555
556extern unsigned long __must_check
557arm_clear_user(void __user *addr, unsigned long n);
558extern unsigned long __must_check
559__clear_user_std(void __user *addr, unsigned long n);
560
561static inline unsigned long __must_check
562__clear_user(void __user *addr, unsigned long n)
563{
564 unsigned int __ua_flags = uaccess_save_and_enable();
565 n = arm_clear_user(addr, n);
566 uaccess_restore(__ua_flags);
567 return n;
568}
569
570#else
571static inline unsigned long
572raw_copy_from_user(void *to, const void __user *from, unsigned long n)
573{
574 memcpy(to, (const void __force *)from, n);
575 return 0;
576}
577static inline unsigned long
578raw_copy_to_user(void __user *to, const void *from, unsigned long n)
579{
580 memcpy((void __force *)to, from, n);
581 return 0;
582}
583#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
584#endif
585#define INLINE_COPY_TO_USER
586#define INLINE_COPY_FROM_USER
587
588static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
589{
590 if (access_ok(to, n))
591 n = __clear_user(to, n);
592 return n;
593}
594
595
596extern long strncpy_from_user(char *dest, const char __user *src, long count);
597
598extern __must_check long strnlen_user(const char __user *str, long n);
599
600#endif
601