1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#ifndef _ASM_RISCV_UACCESS_H
17#define _ASM_RISCV_UACCESS_H
18
19
20
21
22#include <linux/errno.h>
23#include <linux/compiler.h>
24#include <linux/thread_info.h>
25#include <asm/byteorder.h>
26#include <asm/asm.h>
27
28#define __enable_user_access() \
29 __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
30#define __disable_user_access() \
31 __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
32
33
34
35
36
37
38
39
40
41#define KERNEL_DS (~0UL)
42#define USER_DS (TASK_SIZE)
43
44#define get_ds() (KERNEL_DS)
45#define get_fs() (current_thread_info()->addr_limit)
46
47static inline void set_fs(mm_segment_t fs)
48{
49 current_thread_info()->addr_limit = fs;
50}
51
52#define segment_eq(a, b) ((a) == (b))
53
54#define user_addr_max() (get_fs())
55
56
57#define VERIFY_READ 0
58#define VERIFY_WRITE 1
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79#define access_ok(type, addr, size) ({ \
80 __chk_user_ptr(addr); \
81 likely(__access_ok((unsigned long __force)(addr), (size))); \
82})
83
84
85
86
87
88static inline int __access_ok(unsigned long addr, unsigned long size)
89{
90 const mm_segment_t fs = get_fs();
91
92 return (size <= fs) && (addr <= (fs - size));
93}
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108struct exception_table_entry {
109 unsigned long insn, fixup;
110};
111
112extern int fixup_exception(struct pt_regs *state);
113
114#if defined(__LITTLE_ENDIAN)
115#define __MSW 1
116#define __LSW 0
117#elif defined(__BIG_ENDIAN)
118#define __MSW 0
119#define __LSW 1
120#else
121#error "Unknown endianness"
122#endif
123
124
125
126
127
128
129
130#define __get_user_asm(insn, x, ptr, err) \
131do { \
132 uintptr_t __tmp; \
133 __typeof__(x) __x; \
134 __enable_user_access(); \
135 __asm__ __volatile__ ( \
136 "1:\n" \
137 " " insn " %1, %3\n" \
138 "2:\n" \
139 " .section .fixup,\"ax\"\n" \
140 " .balign 4\n" \
141 "3:\n" \
142 " li %0, %4\n" \
143 " li %1, 0\n" \
144 " jump 2b, %2\n" \
145 " .previous\n" \
146 " .section __ex_table,\"a\"\n" \
147 " .balign " RISCV_SZPTR "\n" \
148 " " RISCV_PTR " 1b, 3b\n" \
149 " .previous" \
150 : "+r" (err), "=&r" (__x), "=r" (__tmp) \
151 : "m" (*(ptr)), "i" (-EFAULT)); \
152 __disable_user_access(); \
153 (x) = __x; \
154} while (0)
155
156#ifdef CONFIG_64BIT
157#define __get_user_8(x, ptr, err) \
158 __get_user_asm("ld", x, ptr, err)
159#else
160#define __get_user_8(x, ptr, err) \
161do { \
162 u32 __user *__ptr = (u32 __user *)(ptr); \
163 u32 __lo, __hi; \
164 uintptr_t __tmp; \
165 __enable_user_access(); \
166 __asm__ __volatile__ ( \
167 "1:\n" \
168 " lw %1, %4\n" \
169 "2:\n" \
170 " lw %2, %5\n" \
171 "3:\n" \
172 " .section .fixup,\"ax\"\n" \
173 " .balign 4\n" \
174 "4:\n" \
175 " li %0, %6\n" \
176 " li %1, 0\n" \
177 " li %2, 0\n" \
178 " jump 3b, %3\n" \
179 " .previous\n" \
180 " .section __ex_table,\"a\"\n" \
181 " .balign " RISCV_SZPTR "\n" \
182 " " RISCV_PTR " 1b, 4b\n" \
183 " " RISCV_PTR " 2b, 4b\n" \
184 " .previous" \
185 : "+r" (err), "=&r" (__lo), "=r" (__hi), \
186 "=r" (__tmp) \
187 : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
188 "i" (-EFAULT)); \
189 __disable_user_access(); \
190 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
191 (((u64)__hi << 32) | __lo))); \
192} while (0)
193#endif
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216#define __get_user(x, ptr) \
217({ \
218 register long __gu_err = 0; \
219 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
220 __chk_user_ptr(__gu_ptr); \
221 switch (sizeof(*__gu_ptr)) { \
222 case 1: \
223 __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
224 break; \
225 case 2: \
226 __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
227 break; \
228 case 4: \
229 __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
230 break; \
231 case 8: \
232 __get_user_8((x), __gu_ptr, __gu_err); \
233 break; \
234 default: \
235 BUILD_BUG(); \
236 } \
237 __gu_err; \
238})
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257#define get_user(x, ptr) \
258({ \
259 const __typeof__(*(ptr)) __user *__p = (ptr); \
260 might_fault(); \
261 access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
262 __get_user((x), __p) : \
263 ((x) = 0, -EFAULT); \
264})
265
266#define __put_user_asm(insn, x, ptr, err) \
267do { \
268 uintptr_t __tmp; \
269 __typeof__(*(ptr)) __x = x; \
270 __enable_user_access(); \
271 __asm__ __volatile__ ( \
272 "1:\n" \
273 " " insn " %z3, %2\n" \
274 "2:\n" \
275 " .section .fixup,\"ax\"\n" \
276 " .balign 4\n" \
277 "3:\n" \
278 " li %0, %4\n" \
279 " jump 2b, %1\n" \
280 " .previous\n" \
281 " .section __ex_table,\"a\"\n" \
282 " .balign " RISCV_SZPTR "\n" \
283 " " RISCV_PTR " 1b, 3b\n" \
284 " .previous" \
285 : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
286 : "rJ" (__x), "i" (-EFAULT)); \
287 __disable_user_access(); \
288} while (0)
289
290#ifdef CONFIG_64BIT
291#define __put_user_8(x, ptr, err) \
292 __put_user_asm("sd", x, ptr, err)
293#else
294#define __put_user_8(x, ptr, err) \
295do { \
296 u32 __user *__ptr = (u32 __user *)(ptr); \
297 u64 __x = (__typeof__((x)-(x)))(x); \
298 uintptr_t __tmp; \
299 __enable_user_access(); \
300 __asm__ __volatile__ ( \
301 "1:\n" \
302 " sw %z4, %2\n" \
303 "2:\n" \
304 " sw %z5, %3\n" \
305 "3:\n" \
306 " .section .fixup,\"ax\"\n" \
307 " .balign 4\n" \
308 "4:\n" \
309 " li %0, %6\n" \
310 " jump 2b, %1\n" \
311 " .previous\n" \
312 " .section __ex_table,\"a\"\n" \
313 " .balign " RISCV_SZPTR "\n" \
314 " " RISCV_PTR " 1b, 4b\n" \
315 " " RISCV_PTR " 2b, 4b\n" \
316 " .previous" \
317 : "+r" (err), "=r" (__tmp), \
318 "=m" (__ptr[__LSW]), \
319 "=m" (__ptr[__MSW]) \
320 : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
321 __disable_user_access(); \
322} while (0)
323#endif
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345#define __put_user(x, ptr) \
346({ \
347 register long __pu_err = 0; \
348 __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
349 __chk_user_ptr(__gu_ptr); \
350 switch (sizeof(*__gu_ptr)) { \
351 case 1: \
352 __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
353 break; \
354 case 2: \
355 __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
356 break; \
357 case 4: \
358 __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
359 break; \
360 case 8: \
361 __put_user_8((x), __gu_ptr, __pu_err); \
362 break; \
363 default: \
364 BUILD_BUG(); \
365 } \
366 __pu_err; \
367})
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385#define put_user(x, ptr) \
386({ \
387 __typeof__(*(ptr)) __user *__p = (ptr); \
388 might_fault(); \
389 access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
390 __put_user((x), __p) : \
391 -EFAULT; \
392})
393
394
395extern unsigned long __must_check __asm_copy_to_user(void __user *to,
396 const void *from, unsigned long n);
397extern unsigned long __must_check __asm_copy_from_user(void *to,
398 const void __user *from, unsigned long n);
399
400static inline unsigned long
401raw_copy_from_user(void *to, const void __user *from, unsigned long n)
402{
403 return __asm_copy_to_user(to, from, n);
404}
405
406static inline unsigned long
407raw_copy_to_user(void __user *to, const void *from, unsigned long n)
408{
409 return __asm_copy_from_user(to, from, n);
410}
411
412extern long strncpy_from_user(char *dest, const char __user *src, long count);
413
414extern long __must_check strlen_user(const char __user *str);
415extern long __must_check strnlen_user(const char __user *str, long n);
416
417extern
418unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
419
420static inline
421unsigned long __must_check clear_user(void __user *to, unsigned long n)
422{
423 might_fault();
424 return access_ok(VERIFY_WRITE, to, n) ?
425 __clear_user(to, n) : n;
426}
427
428
429
430
431
432
433#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
434({ \
435 __typeof__(ptr) __ptr = (ptr); \
436 __typeof__(*(ptr)) __old = (old); \
437 __typeof__(*(ptr)) __new = (new); \
438 __typeof__(*(ptr)) __ret; \
439 __typeof__(err) __err = 0; \
440 register unsigned int __rc; \
441 __enable_user_access(); \
442 switch (size) { \
443 case 4: \
444 __asm__ __volatile__ ( \
445 "0:\n" \
446 " lr.w" #scb " %[ret], %[ptr]\n" \
447 " bne %[ret], %z[old], 1f\n" \
448 " sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \
449 " bnez %[rc], 0b\n" \
450 "1:\n" \
451 ".section .fixup,\"ax\"\n" \
452 ".balign 4\n" \
453 "2:\n" \
454 " li %[err], %[efault]\n" \
455 " jump 1b, %[rc]\n" \
456 ".previous\n" \
457 ".section __ex_table,\"a\"\n" \
458 ".balign " RISCV_SZPTR "\n" \
459 " " RISCV_PTR " 1b, 2b\n" \
460 ".previous\n" \
461 : [ret] "=&r" (__ret), \
462 [rc] "=&r" (__rc), \
463 [ptr] "+A" (*__ptr), \
464 [err] "=&r" (__err) \
465 : [old] "rJ" (__old), \
466 [new] "rJ" (__new), \
467 [efault] "i" (-EFAULT)); \
468 break; \
469 case 8: \
470 __asm__ __volatile__ ( \
471 "0:\n" \
472 " lr.d" #scb " %[ret], %[ptr]\n" \
473 " bne %[ret], %z[old], 1f\n" \
474 " sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \
475 " bnez %[rc], 0b\n" \
476 "1:\n" \
477 ".section .fixup,\"ax\"\n" \
478 ".balign 4\n" \
479 "2:\n" \
480 " li %[err], %[efault]\n" \
481 " jump 1b, %[rc]\n" \
482 ".previous\n" \
483 ".section __ex_table,\"a\"\n" \
484 ".balign " RISCV_SZPTR "\n" \
485 " " RISCV_PTR " 1b, 2b\n" \
486 ".previous\n" \
487 : [ret] "=&r" (__ret), \
488 [rc] "=&r" (__rc), \
489 [ptr] "+A" (*__ptr), \
490 [err] "=&r" (__err) \
491 : [old] "rJ" (__old), \
492 [new] "rJ" (__new), \
493 [efault] "i" (-EFAULT)); \
494 break; \
495 default: \
496 BUILD_BUG(); \
497 } \
498 __disable_user_access(); \
499 (err) = __err; \
500 __ret; \
501})
502
503#endif
504