1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _CRIS_UACCESS_H
16#define _CRIS_UACCESS_H
17
18#ifndef __ASSEMBLY__
19#include <linux/sched.h>
20#include <linux/errno.h>
21#include <asm/processor.h>
22#include <asm/page.h>
23
24#define VERIFY_READ 0
25#define VERIFY_WRITE 1
26
27
28
29
30
31
32
33
34
35#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
36
37
38
39
40
41
42
43#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
44#define USER_DS MAKE_MM_SEG(TASK_SIZE)
45
46#define get_ds() (KERNEL_DS)
47#define get_fs() (current_thread_info()->addr_limit)
48#define set_fs(x) (current_thread_info()->addr_limit = (x))
49
50#define segment_eq(a, b) ((a).seg == (b).seg)
51
52#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
53#define __user_ok(addr, size) \
54 (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
55#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
56#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
57
58#include <arch/uaccess.h>
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73struct exception_table_entry {
74 unsigned long insn, fixup;
75};
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define get_user(x, ptr) \
96 __get_user_check((x), (ptr), sizeof(*(ptr)))
97#define put_user(x, ptr) \
98 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
99
100#define __get_user(x, ptr) \
101 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
102#define __put_user(x, ptr) \
103 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
104
105extern long __put_user_bad(void);
106
107#define __put_user_size(x, ptr, size, retval) \
108do { \
109 retval = 0; \
110 switch (size) { \
111 case 1: \
112 __put_user_asm(x, ptr, retval, "move.b"); \
113 break; \
114 case 2: \
115 __put_user_asm(x, ptr, retval, "move.w"); \
116 break; \
117 case 4: \
118 __put_user_asm(x, ptr, retval, "move.d"); \
119 break; \
120 case 8: \
121 __put_user_asm_64(x, ptr, retval); \
122 break; \
123 default: \
124 __put_user_bad(); \
125 } \
126} while (0)
127
128#define __get_user_size(x, ptr, size, retval) \
129do { \
130 retval = 0; \
131 switch (size) { \
132 case 1: \
133 __get_user_asm(x, ptr, retval, "move.b"); \
134 break; \
135 case 2: \
136 __get_user_asm(x, ptr, retval, "move.w"); \
137 break; \
138 case 4: \
139 __get_user_asm(x, ptr, retval, "move.d"); \
140 break; \
141 case 8: \
142 __get_user_asm_64(x, ptr, retval); \
143 break; \
144 default: \
145 (x) = __get_user_bad(); \
146 } \
147} while (0)
148
149#define __put_user_nocheck(x, ptr, size) \
150({ \
151 long __pu_err; \
152 __put_user_size((x), (ptr), (size), __pu_err); \
153 __pu_err; \
154})
155
156#define __put_user_check(x, ptr, size) \
157({ \
158 long __pu_err = -EFAULT; \
159 __typeof__(*(ptr)) *__pu_addr = (ptr); \
160 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
161 __put_user_size((x), __pu_addr, (size), __pu_err); \
162 __pu_err; \
163})
164
165struct __large_struct { unsigned long buf[100]; };
166#define __m(x) (*(struct __large_struct *)(x))
167
168
169
170#define __get_user_nocheck(x, ptr, size) \
171({ \
172 long __gu_err, __gu_val; \
173 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
174 (x) = (__force __typeof__(*(ptr)))__gu_val; \
175 __gu_err; \
176})
177
178#define __get_user_check(x, ptr, size) \
179({ \
180 long __gu_err = -EFAULT, __gu_val = 0; \
181 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
182 if (access_ok(VERIFY_READ, __gu_addr, size)) \
183 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
184 (x) = (__force __typeof__(*(ptr)))__gu_val; \
185 __gu_err; \
186})
187
188extern long __get_user_bad(void);
189
190
191
192
193extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);
194extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
195extern unsigned long __do_clear_user(void __user *to, unsigned long n);
196
197static inline unsigned long
198__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
199{
200 if (access_ok(VERIFY_WRITE, to, n))
201 return __copy_user(to, from, n);
202 return n;
203}
204
205static inline unsigned long
206__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
207{
208 if (access_ok(VERIFY_READ, from, n))
209 return __copy_user_zeroing(to, from, n);
210 return n;
211}
212
213static inline unsigned long
214__generic_clear_user(void __user *to, unsigned long n)
215{
216 if (access_ok(VERIFY_WRITE, to, n))
217 return __do_clear_user(to, n);
218 return n;
219}
220
221static inline long
222__strncpy_from_user(char *dst, const char __user *src, long count)
223{
224 return __do_strncpy_from_user(dst, src, count);
225}
226
227static inline long
228strncpy_from_user(char *dst, const char __user *src, long count)
229{
230 long res = -EFAULT;
231
232 if (access_ok(VERIFY_READ, src, 1))
233 res = __do_strncpy_from_user(dst, src, count);
234 return res;
235}
236
237
238
239
240
241static inline unsigned long
242__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
243{
244 unsigned long ret = 0;
245
246 if (n == 0)
247 ;
248 else if (n == 1)
249 __asm_copy_from_user_1(to, from, ret);
250 else if (n == 2)
251 __asm_copy_from_user_2(to, from, ret);
252 else if (n == 3)
253 __asm_copy_from_user_3(to, from, ret);
254 else if (n == 4)
255 __asm_copy_from_user_4(to, from, ret);
256 else if (n == 5)
257 __asm_copy_from_user_5(to, from, ret);
258 else if (n == 6)
259 __asm_copy_from_user_6(to, from, ret);
260 else if (n == 7)
261 __asm_copy_from_user_7(to, from, ret);
262 else if (n == 8)
263 __asm_copy_from_user_8(to, from, ret);
264 else if (n == 9)
265 __asm_copy_from_user_9(to, from, ret);
266 else if (n == 10)
267 __asm_copy_from_user_10(to, from, ret);
268 else if (n == 11)
269 __asm_copy_from_user_11(to, from, ret);
270 else if (n == 12)
271 __asm_copy_from_user_12(to, from, ret);
272 else if (n == 13)
273 __asm_copy_from_user_13(to, from, ret);
274 else if (n == 14)
275 __asm_copy_from_user_14(to, from, ret);
276 else if (n == 15)
277 __asm_copy_from_user_15(to, from, ret);
278 else if (n == 16)
279 __asm_copy_from_user_16(to, from, ret);
280 else if (n == 20)
281 __asm_copy_from_user_20(to, from, ret);
282 else if (n == 24)
283 __asm_copy_from_user_24(to, from, ret);
284 else
285 ret = __generic_copy_from_user(to, from, n);
286
287 return ret;
288}
289
290
291
292static inline unsigned long
293__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
294{
295 unsigned long ret = 0;
296
297 if (n == 0)
298 ;
299 else if (n == 1)
300 __asm_copy_to_user_1(to, from, ret);
301 else if (n == 2)
302 __asm_copy_to_user_2(to, from, ret);
303 else if (n == 3)
304 __asm_copy_to_user_3(to, from, ret);
305 else if (n == 4)
306 __asm_copy_to_user_4(to, from, ret);
307 else if (n == 5)
308 __asm_copy_to_user_5(to, from, ret);
309 else if (n == 6)
310 __asm_copy_to_user_6(to, from, ret);
311 else if (n == 7)
312 __asm_copy_to_user_7(to, from, ret);
313 else if (n == 8)
314 __asm_copy_to_user_8(to, from, ret);
315 else if (n == 9)
316 __asm_copy_to_user_9(to, from, ret);
317 else if (n == 10)
318 __asm_copy_to_user_10(to, from, ret);
319 else if (n == 11)
320 __asm_copy_to_user_11(to, from, ret);
321 else if (n == 12)
322 __asm_copy_to_user_12(to, from, ret);
323 else if (n == 13)
324 __asm_copy_to_user_13(to, from, ret);
325 else if (n == 14)
326 __asm_copy_to_user_14(to, from, ret);
327 else if (n == 15)
328 __asm_copy_to_user_15(to, from, ret);
329 else if (n == 16)
330 __asm_copy_to_user_16(to, from, ret);
331 else if (n == 20)
332 __asm_copy_to_user_20(to, from, ret);
333 else if (n == 24)
334 __asm_copy_to_user_24(to, from, ret);
335 else
336 ret = __generic_copy_to_user(to, from, n);
337
338 return ret;
339}
340
341
342
343static inline unsigned long
344__constant_clear_user(void __user *to, unsigned long n)
345{
346 unsigned long ret = 0;
347
348 if (n == 0)
349 ;
350 else if (n == 1)
351 __asm_clear_1(to, ret);
352 else if (n == 2)
353 __asm_clear_2(to, ret);
354 else if (n == 3)
355 __asm_clear_3(to, ret);
356 else if (n == 4)
357 __asm_clear_4(to, ret);
358 else if (n == 8)
359 __asm_clear_8(to, ret);
360 else if (n == 12)
361 __asm_clear_12(to, ret);
362 else if (n == 16)
363 __asm_clear_16(to, ret);
364 else if (n == 20)
365 __asm_clear_20(to, ret);
366 else if (n == 24)
367 __asm_clear_24(to, ret);
368 else
369 ret = __generic_clear_user(to, n);
370
371 return ret;
372}
373
374
375#define clear_user(to, n) \
376 (__builtin_constant_p(n) ? \
377 __constant_clear_user(to, n) : \
378 __generic_clear_user(to, n))
379
380#define copy_from_user(to, from, n) \
381 (__builtin_constant_p(n) ? \
382 __constant_copy_from_user(to, from, n) : \
383 __generic_copy_from_user(to, from, n))
384
385#define copy_to_user(to, from, n) \
386 (__builtin_constant_p(n) ? \
387 __constant_copy_to_user(to, from, n) : \
388 __generic_copy_to_user(to, from, n))
389
390
391
392
393
394static inline unsigned long
395__generic_copy_from_user_nocheck(void *to, const void __user *from,
396 unsigned long n)
397{
398 return __copy_user_zeroing(to, from, n);
399}
400
401static inline unsigned long
402__generic_copy_to_user_nocheck(void __user *to, const void *from,
403 unsigned long n)
404{
405 return __copy_user(to, from, n);
406}
407
408static inline unsigned long
409__generic_clear_user_nocheck(void __user *to, unsigned long n)
410{
411 return __do_clear_user(to, n);
412}
413
414
415
416#define __copy_to_user(to, from, n) \
417 __generic_copy_to_user_nocheck((to), (from), (n))
418#define __copy_from_user(to, from, n) \
419 __generic_copy_from_user_nocheck((to), (from), (n))
420#define __copy_to_user_inatomic __copy_to_user
421#define __copy_from_user_inatomic __copy_from_user
422#define __clear_user(to, n) __generic_clear_user_nocheck((to), (n))
423
424#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
425
426#endif
427
428#endif
429