1
2
3
4
5
6
7
8#ifndef _ASM_RISCV_UACCESS_H
9#define _ASM_RISCV_UACCESS_H
10
11#include <asm/pgtable.h>
12
13
14
15
16#ifdef CONFIG_MMU
17#include <linux/errno.h>
18#include <linux/compiler.h>
19#include <linux/thread_info.h>
20#include <asm/byteorder.h>
21#include <asm/extable.h>
22#include <asm/asm.h>
23
24#define __enable_user_access() \
25 __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
26#define __disable_user_access() \
27 __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#define access_ok(addr, size) ({ \
46 __chk_user_ptr(addr); \
47 likely(__access_ok((unsigned long __force)(addr), (size))); \
48})
49
50
51
52
53
54static inline int __access_ok(unsigned long addr, unsigned long size)
55{
56 return size <= TASK_SIZE && addr <= TASK_SIZE - size;
57}
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72#define __LSW 0
73#define __MSW 1
74
75
76
77
78
79
80
81#define __get_user_asm(insn, x, ptr, err) \
82do { \
83 uintptr_t __tmp; \
84 __typeof__(x) __x; \
85 __asm__ __volatile__ ( \
86 "1:\n" \
87 " " insn " %1, %3\n" \
88 "2:\n" \
89 " .section .fixup,\"ax\"\n" \
90 " .balign 4\n" \
91 "3:\n" \
92 " li %0, %4\n" \
93 " li %1, 0\n" \
94 " jump 2b, %2\n" \
95 " .previous\n" \
96 " .section __ex_table,\"a\"\n" \
97 " .balign " RISCV_SZPTR "\n" \
98 " " RISCV_PTR " 1b, 3b\n" \
99 " .previous" \
100 : "+r" (err), "=&r" (__x), "=r" (__tmp) \
101 : "m" (*(ptr)), "i" (-EFAULT)); \
102 (x) = __x; \
103} while (0)
104
105#ifdef CONFIG_64BIT
106#define __get_user_8(x, ptr, err) \
107 __get_user_asm("ld", x, ptr, err)
108#else
109#define __get_user_8(x, ptr, err) \
110do { \
111 u32 __user *__ptr = (u32 __user *)(ptr); \
112 u32 __lo, __hi; \
113 uintptr_t __tmp; \
114 __asm__ __volatile__ ( \
115 "1:\n" \
116 " lw %1, %4\n" \
117 "2:\n" \
118 " lw %2, %5\n" \
119 "3:\n" \
120 " .section .fixup,\"ax\"\n" \
121 " .balign 4\n" \
122 "4:\n" \
123 " li %0, %6\n" \
124 " li %1, 0\n" \
125 " li %2, 0\n" \
126 " jump 3b, %3\n" \
127 " .previous\n" \
128 " .section __ex_table,\"a\"\n" \
129 " .balign " RISCV_SZPTR "\n" \
130 " " RISCV_PTR " 1b, 4b\n" \
131 " " RISCV_PTR " 2b, 4b\n" \
132 " .previous" \
133 : "+r" (err), "=&r" (__lo), "=r" (__hi), \
134 "=r" (__tmp) \
135 : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
136 "i" (-EFAULT)); \
137 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
138 (((u64)__hi << 32) | __lo))); \
139} while (0)
140#endif
141
142#define __get_user_nocheck(x, __gu_ptr, __gu_err) \
143do { \
144 switch (sizeof(*__gu_ptr)) { \
145 case 1: \
146 __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
147 break; \
148 case 2: \
149 __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
150 break; \
151 case 4: \
152 __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
153 break; \
154 case 8: \
155 __get_user_8((x), __gu_ptr, __gu_err); \
156 break; \
157 default: \
158 BUILD_BUG(); \
159 } \
160} while (0)
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182#define __get_user(x, ptr) \
183({ \
184 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
185 long __gu_err = 0; \
186 \
187 __chk_user_ptr(__gu_ptr); \
188 \
189 __enable_user_access(); \
190 __get_user_nocheck(x, __gu_ptr, __gu_err); \
191 __disable_user_access(); \
192 \
193 __gu_err; \
194})
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213#define get_user(x, ptr) \
214({ \
215 const __typeof__(*(ptr)) __user *__p = (ptr); \
216 might_fault(); \
217 access_ok(__p, sizeof(*__p)) ? \
218 __get_user((x), __p) : \
219 ((x) = 0, -EFAULT); \
220})
221
222#define __put_user_asm(insn, x, ptr, err) \
223do { \
224 uintptr_t __tmp; \
225 __typeof__(*(ptr)) __x = x; \
226 __asm__ __volatile__ ( \
227 "1:\n" \
228 " " insn " %z3, %2\n" \
229 "2:\n" \
230 " .section .fixup,\"ax\"\n" \
231 " .balign 4\n" \
232 "3:\n" \
233 " li %0, %4\n" \
234 " jump 2b, %1\n" \
235 " .previous\n" \
236 " .section __ex_table,\"a\"\n" \
237 " .balign " RISCV_SZPTR "\n" \
238 " " RISCV_PTR " 1b, 3b\n" \
239 " .previous" \
240 : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
241 : "rJ" (__x), "i" (-EFAULT)); \
242} while (0)
243
244#ifdef CONFIG_64BIT
245#define __put_user_8(x, ptr, err) \
246 __put_user_asm("sd", x, ptr, err)
247#else
248#define __put_user_8(x, ptr, err) \
249do { \
250 u32 __user *__ptr = (u32 __user *)(ptr); \
251 u64 __x = (__typeof__((x)-(x)))(x); \
252 uintptr_t __tmp; \
253 __asm__ __volatile__ ( \
254 "1:\n" \
255 " sw %z4, %2\n" \
256 "2:\n" \
257 " sw %z5, %3\n" \
258 "3:\n" \
259 " .section .fixup,\"ax\"\n" \
260 " .balign 4\n" \
261 "4:\n" \
262 " li %0, %6\n" \
263 " jump 3b, %1\n" \
264 " .previous\n" \
265 " .section __ex_table,\"a\"\n" \
266 " .balign " RISCV_SZPTR "\n" \
267 " " RISCV_PTR " 1b, 4b\n" \
268 " " RISCV_PTR " 2b, 4b\n" \
269 " .previous" \
270 : "+r" (err), "=r" (__tmp), \
271 "=m" (__ptr[__LSW]), \
272 "=m" (__ptr[__MSW]) \
273 : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
274} while (0)
275#endif
276
277#define __put_user_nocheck(x, __gu_ptr, __pu_err) \
278do { \
279 switch (sizeof(*__gu_ptr)) { \
280 case 1: \
281 __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
282 break; \
283 case 2: \
284 __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
285 break; \
286 case 4: \
287 __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
288 break; \
289 case 8: \
290 __put_user_8((x), __gu_ptr, __pu_err); \
291 break; \
292 default: \
293 BUILD_BUG(); \
294 } \
295} while (0)
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316#define __put_user(x, ptr) \
317({ \
318 __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
319 long __pu_err = 0; \
320 \
321 __chk_user_ptr(__gu_ptr); \
322 \
323 __enable_user_access(); \
324 __put_user_nocheck(x, __gu_ptr, __pu_err); \
325 __disable_user_access(); \
326 \
327 __pu_err; \
328})
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346#define put_user(x, ptr) \
347({ \
348 __typeof__(*(ptr)) __user *__p = (ptr); \
349 might_fault(); \
350 access_ok(__p, sizeof(*__p)) ? \
351 __put_user((x), __p) : \
352 -EFAULT; \
353})
354
355
356unsigned long __must_check __asm_copy_to_user(void __user *to,
357 const void *from, unsigned long n);
358unsigned long __must_check __asm_copy_from_user(void *to,
359 const void __user *from, unsigned long n);
360
361static inline unsigned long
362raw_copy_from_user(void *to, const void __user *from, unsigned long n)
363{
364 return __asm_copy_from_user(to, from, n);
365}
366
367static inline unsigned long
368raw_copy_to_user(void __user *to, const void *from, unsigned long n)
369{
370 return __asm_copy_to_user(to, from, n);
371}
372
373extern long strncpy_from_user(char *dest, const char __user *src, long count);
374
375extern long __must_check strlen_user(const char __user *str);
376extern long __must_check strnlen_user(const char __user *str, long n);
377
378extern
379unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
380
381static inline
382unsigned long __must_check clear_user(void __user *to, unsigned long n)
383{
384 might_fault();
385 return access_ok(to, n) ?
386 __clear_user(to, n) : n;
387}
388
389
390
391
392
393
394#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
395({ \
396 __typeof__(ptr) __ptr = (ptr); \
397 __typeof__(*(ptr)) __old = (old); \
398 __typeof__(*(ptr)) __new = (new); \
399 __typeof__(*(ptr)) __ret; \
400 __typeof__(err) __err = 0; \
401 register unsigned int __rc; \
402 __enable_user_access(); \
403 switch (size) { \
404 case 4: \
405 __asm__ __volatile__ ( \
406 "0:\n" \
407 " lr.w" #scb " %[ret], %[ptr]\n" \
408 " bne %[ret], %z[old], 1f\n" \
409 " sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \
410 " bnez %[rc], 0b\n" \
411 "1:\n" \
412 ".section .fixup,\"ax\"\n" \
413 ".balign 4\n" \
414 "2:\n" \
415 " li %[err], %[efault]\n" \
416 " jump 1b, %[rc]\n" \
417 ".previous\n" \
418 ".section __ex_table,\"a\"\n" \
419 ".balign " RISCV_SZPTR "\n" \
420 " " RISCV_PTR " 1b, 2b\n" \
421 ".previous\n" \
422 : [ret] "=&r" (__ret), \
423 [rc] "=&r" (__rc), \
424 [ptr] "+A" (*__ptr), \
425 [err] "=&r" (__err) \
426 : [old] "rJ" (__old), \
427 [new] "rJ" (__new), \
428 [efault] "i" (-EFAULT)); \
429 break; \
430 case 8: \
431 __asm__ __volatile__ ( \
432 "0:\n" \
433 " lr.d" #scb " %[ret], %[ptr]\n" \
434 " bne %[ret], %z[old], 1f\n" \
435 " sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \
436 " bnez %[rc], 0b\n" \
437 "1:\n" \
438 ".section .fixup,\"ax\"\n" \
439 ".balign 4\n" \
440 "2:\n" \
441 " li %[err], %[efault]\n" \
442 " jump 1b, %[rc]\n" \
443 ".previous\n" \
444 ".section __ex_table,\"a\"\n" \
445 ".balign " RISCV_SZPTR "\n" \
446 " " RISCV_PTR " 1b, 2b\n" \
447 ".previous\n" \
448 : [ret] "=&r" (__ret), \
449 [rc] "=&r" (__rc), \
450 [ptr] "+A" (*__ptr), \
451 [err] "=&r" (__err) \
452 : [old] "rJ" (__old), \
453 [new] "rJ" (__new), \
454 [efault] "i" (-EFAULT)); \
455 break; \
456 default: \
457 BUILD_BUG(); \
458 } \
459 __disable_user_access(); \
460 (err) = __err; \
461 __ret; \
462})
463
464#define HAVE_GET_KERNEL_NOFAULT
465
466#define __get_kernel_nofault(dst, src, type, err_label) \
467do { \
468 long __kr_err; \
469 \
470 __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
471 if (unlikely(__kr_err)) \
472 goto err_label; \
473} while (0)
474
475#define __put_kernel_nofault(dst, src, type, err_label) \
476do { \
477 long __kr_err; \
478 \
479 __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
480 if (unlikely(__kr_err)) \
481 goto err_label; \
482} while (0)
483
484#else
485#include <asm-generic/uaccess.h>
486#endif
487#endif
488