1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_UACCESS_H
12#define _ASM_UACCESS_H
13
14
15
16
17#include <linux/thread_info.h>
18#include <asm/page.h>
19#include <asm/errno.h>
20
21#define VERIFY_READ 0
22#define VERIFY_WRITE 1
23
24
25
26
27
28
29
30
31#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
32
33#define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF)
34#define KERNEL_DS MAKE_MM_SEG(0x9FFFFFFF)
35#define USER_DS MAKE_MM_SEG(TASK_SIZE)
36
37#define get_ds() (KERNEL_DS)
38#define get_fs() (current_thread_info()->addr_limit)
39#define set_fs(x) (current_thread_info()->addr_limit = (x))
40#define __kernel_ds_p() (current_thread_info()->addr_limit.seg == 0x9FFFFFFF)
41
42#define segment_eq(a, b) ((a).seg == (b).seg)
43
44#define __addr_ok(addr) \
45 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
46
47
48
49
50static inline int ___range_ok(unsigned long addr, unsigned int size)
51{
52 int flag = 1, tmp;
53
54 asm(" add %3,%1 \n"
55 " bcs 0f \n"
56 " cmp %4,%1 \n"
57 " bhi 0f \n"
58 " clr %0 \n"
59 "0: \n"
60 : "=r"(flag), "=&r"(tmp)
61 : "1"(addr), "ir"(size),
62 "r"(current_thread_info()->addr_limit.seg), "0"(flag)
63 : "cc"
64 );
65
66 return flag;
67}
68
69#define __range_ok(addr, size) ___range_ok((unsigned long)(addr), (u32)(size))
70
71#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
72#define __access_ok(addr, size) (__range_ok((addr), (size)) == 0)
73
74static inline int verify_area(int type, const void *addr, unsigned long size)
75{
76 return access_ok(type, addr, size) ? 0 : -EFAULT;
77}
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93struct exception_table_entry
94{
95 unsigned long insn, fixup;
96};
97
98
99extern int fixup_exception(struct pt_regs *regs);
100
101#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
102#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
103
104
105
106
107
108
109#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
110#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
111
112
113
114
115
116
117
118#define put_user_ret(x, ptr, ret) \
119 ({ if (put_user((x), (ptr))) return (ret); })
120#define get_user_ret(x, ptr, ret) \
121 ({ if (get_user((x), (ptr))) return (ret); })
122#define __put_user_ret(x, ptr, ret) \
123 ({ if (__put_user((x), (ptr))) return (ret); })
124#define __get_user_ret(x, ptr, ret) \
125 ({ if (__get_user((x), (ptr))) return (ret); })
126
127struct __large_struct { unsigned long buf[100]; };
128#define __m(x) (*(struct __large_struct *)(x))
129
130#define __get_user_nocheck(x, ptr, size) \
131({ \
132 unsigned long __gu_addr; \
133 int __gu_err; \
134 __gu_addr = (unsigned long) (ptr); \
135 switch (size) { \
136 case 1: { \
137 unsigned char __gu_val; \
138 __get_user_asm("bu"); \
139 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
140 break; \
141 } \
142 case 2: { \
143 unsigned short __gu_val; \
144 __get_user_asm("hu"); \
145 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
146 break; \
147 } \
148 case 4: { \
149 unsigned int __gu_val; \
150 __get_user_asm(""); \
151 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
152 break; \
153 } \
154 default: \
155 __get_user_unknown(); \
156 break; \
157 } \
158 __gu_err; \
159})
160
161#define __get_user_check(x, ptr, size) \
162({ \
163 const __typeof__(ptr) __guc_ptr = (ptr); \
164 int _e; \
165 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
166 _e = __get_user_nocheck((x), __guc_ptr, (size)); \
167 else { \
168 _e = -EFAULT; \
169 (x) = (__typeof__(x))0; \
170 } \
171 _e; \
172})
173
174#define __get_user_asm(INSN) \
175({ \
176 asm volatile( \
177 "1:\n" \
178 " mov"INSN" %2,%1\n" \
179 " mov 0,%0\n" \
180 "2:\n" \
181 " .section .fixup,\"ax\"\n" \
182 "3:\n\t" \
183 " mov %3,%0\n" \
184 " jmp 2b\n" \
185 " .previous\n" \
186 " .section __ex_table,\"a\"\n" \
187 " .balign 4\n" \
188 " .long 1b, 3b\n" \
189 " .previous" \
190 : "=&r" (__gu_err), "=&r" (__gu_val) \
191 : "m" (__m(__gu_addr)), "i" (-EFAULT)); \
192})
193
194extern int __get_user_unknown(void);
195
196#define __put_user_nocheck(x, ptr, size) \
197({ \
198 union { \
199 __typeof__(*(ptr)) val; \
200 u32 bits[2]; \
201 } __pu_val; \
202 unsigned long __pu_addr; \
203 int __pu_err; \
204 __pu_val.val = (x); \
205 __pu_addr = (unsigned long) (ptr); \
206 switch (size) { \
207 case 1: __put_user_asm("bu"); break; \
208 case 2: __put_user_asm("hu"); break; \
209 case 4: __put_user_asm("" ); break; \
210 case 8: __put_user_asm8(); break; \
211 default: __pu_err = __put_user_unknown(); break; \
212 } \
213 __pu_err; \
214})
215
216#define __put_user_check(x, ptr, size) \
217({ \
218 union { \
219 __typeof__(*(ptr)) val; \
220 u32 bits[2]; \
221 } __pu_val; \
222 unsigned long __pu_addr; \
223 int __pu_err; \
224 __pu_val.val = (x); \
225 __pu_addr = (unsigned long) (ptr); \
226 if (likely(__access_ok(__pu_addr, size))) { \
227 switch (size) { \
228 case 1: __put_user_asm("bu"); break; \
229 case 2: __put_user_asm("hu"); break; \
230 case 4: __put_user_asm("" ); break; \
231 case 8: __put_user_asm8(); break; \
232 default: __pu_err = __put_user_unknown(); break; \
233 } \
234 } \
235 else { \
236 __pu_err = -EFAULT; \
237 } \
238 __pu_err; \
239})
240
241#define __put_user_asm(INSN) \
242({ \
243 asm volatile( \
244 "1:\n" \
245 " mov"INSN" %1,%2\n" \
246 " mov 0,%0\n" \
247 "2:\n" \
248 " .section .fixup,\"ax\"\n" \
249 "3:\n" \
250 " mov %3,%0\n" \
251 " jmp 2b\n" \
252 " .previous\n" \
253 " .section __ex_table,\"a\"\n" \
254 " .balign 4\n" \
255 " .long 1b, 3b\n" \
256 " .previous" \
257 : "=&r" (__pu_err) \
258 : "r" (__pu_val.val), "m" (__m(__pu_addr)), \
259 "i" (-EFAULT) \
260 ); \
261})
262
263#define __put_user_asm8() \
264({ \
265 asm volatile( \
266 "1: mov %1,%3 \n" \
267 "2: mov %2,%4 \n" \
268 " mov 0,%0 \n" \
269 "3: \n" \
270 " .section .fixup,\"ax\" \n" \
271 "4: \n" \
272 " mov %5,%0 \n" \
273 " jmp 3b \n" \
274 " .previous \n" \
275 " .section __ex_table,\"a\"\n" \
276 " .balign 4 \n" \
277 " .long 1b, 4b \n" \
278 " .long 2b, 4b \n" \
279 " .previous \n" \
280 : "=&r" (__pu_err) \
281 : "r" (__pu_val.bits[0]), "r" (__pu_val.bits[1]), \
282 "m" (__m(__pu_addr)), "m" (__m(__pu_addr+4)), \
283 "i" (-EFAULT) \
284 ); \
285})
286
287extern int __put_user_unknown(void);
288
289
290
291
292
293
294#define __copy_user(to, from, size) \
295do { \
296 if (size) { \
297 void *__to = to; \
298 const void *__from = from; \
299 int w; \
300 asm volatile( \
301 "0: movbu (%0),%3;\n" \
302 "1: movbu %3,(%1);\n" \
303 " inc %0;\n" \
304 " inc %1;\n" \
305 " add -1,%2;\n" \
306 " bne 0b;\n" \
307 "2:\n" \
308 " .section .fixup,\"ax\"\n" \
309 "3: jmp 2b\n" \
310 " .previous\n" \
311 " .section __ex_table,\"a\"\n" \
312 " .balign 4\n" \
313 " .long 0b,3b\n" \
314 " .long 1b,3b\n" \
315 " .previous\n" \
316 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
317 : "0"(__from), "1"(__to), "2"(size) \
318 : "cc", "memory"); \
319 } \
320} while (0)
321
322#define __copy_user_zeroing(to, from, size) \
323do { \
324 if (size) { \
325 void *__to = to; \
326 const void *__from = from; \
327 int w; \
328 asm volatile( \
329 "0: movbu (%0),%3;\n" \
330 "1: movbu %3,(%1);\n" \
331 " inc %0;\n" \
332 " inc %1;\n" \
333 " add -1,%2;\n" \
334 " bne 0b;\n" \
335 "2:\n" \
336 " .section .fixup,\"ax\"\n" \
337 "3:\n" \
338 " mov %2,%0\n" \
339 " clr %3\n" \
340 "4: movbu %3,(%1);\n" \
341 " inc %1;\n" \
342 " add -1,%2;\n" \
343 " bne 4b;\n" \
344 " mov %0,%2\n" \
345 " jmp 2b\n" \
346 " .previous\n" \
347 " .section __ex_table,\"a\"\n" \
348 " .balign 4\n" \
349 " .long 0b,3b\n" \
350 " .long 1b,3b\n" \
351 " .previous\n" \
352 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
353 : "0"(__from), "1"(__to), "2"(size) \
354 : "cc", "memory"); \
355 } \
356} while (0)
357
358
359
360
361static inline
362unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
363 unsigned long n)
364{
365 __copy_user_zeroing(to, from, n);
366 return n;
367}
368
369static inline
370unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
371 unsigned long n)
372{
373 __copy_user(to, from, n);
374 return n;
375}
376
377
378#if 0
379#error "don't use - these macros don't increment to & from pointers"
380
381#define __constant_copy_user(to, from, size) \
382do { \
383 asm volatile( \
384 " mov %0,a0;\n" \
385 "0: movbu (%1),d3;\n" \
386 "1: movbu d3,(%2);\n" \
387 " add -1,a0;\n" \
388 " bne 0b;\n" \
389 "2:;" \
390 ".section .fixup,\"ax\"\n" \
391 "3: jmp 2b\n" \
392 ".previous\n" \
393 ".section __ex_table,\"a\"\n" \
394 " .balign 4\n" \
395 " .long 0b,3b\n" \
396 " .long 1b,3b\n" \
397 ".previous" \
398 : \
399 : "d"(size), "d"(to), "d"(from) \
400 : "d3", "a0"); \
401} while (0)
402
403
404#define __constant_copy_user_zeroing(to, from, size) \
405do { \
406 asm volatile( \
407 " mov %0,a0;\n" \
408 "0: movbu (%1),d3;\n" \
409 "1: movbu d3,(%2);\n" \
410 " add -1,a0;\n" \
411 " bne 0b;\n" \
412 "2:;" \
413 ".section .fixup,\"ax\"\n" \
414 "3: jmp 2b\n" \
415 ".previous\n" \
416 ".section __ex_table,\"a\"\n" \
417 " .balign 4\n" \
418 " .long 0b,3b\n" \
419 " .long 1b,3b\n" \
420 ".previous" \
421 : \
422 : "d"(size), "d"(to), "d"(from) \
423 : "d3", "a0"); \
424} while (0)
425
426static inline
427unsigned long __constant_copy_to_user(void *to, const void *from,
428 unsigned long n)
429{
430 if (access_ok(VERIFY_WRITE, to, n))
431 __constant_copy_user(to, from, n);
432 return n;
433}
434
435static inline
436unsigned long __constant_copy_from_user(void *to, const void *from,
437 unsigned long n)
438{
439 if (access_ok(VERIFY_READ, from, n))
440 __constant_copy_user_zeroing(to, from, n);
441 return n;
442}
443
444static inline
445unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
446 unsigned long n)
447{
448 __constant_copy_user(to, from, n);
449 return n;
450}
451
452static inline
453unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
454 unsigned long n)
455{
456 __constant_copy_user_zeroing(to, from, n);
457 return n;
458}
459#endif
460
461extern unsigned long __generic_copy_to_user(void __user *, const void *,
462 unsigned long);
463extern unsigned long __generic_copy_from_user(void *, const void __user *,
464 unsigned long);
465
466#define __copy_to_user_inatomic(to, from, n) \
467 __generic_copy_to_user_nocheck((to), (from), (n))
468#define __copy_from_user_inatomic(to, from, n) \
469 __generic_copy_from_user_nocheck((to), (from), (n))
470
471#define __copy_to_user(to, from, n) \
472({ \
473 might_sleep(); \
474 __copy_to_user_inatomic((to), (from), (n)); \
475})
476
477#define __copy_from_user(to, from, n) \
478({ \
479 might_sleep(); \
480 __copy_from_user_inatomic((to), (from), (n)); \
481})
482
483
484#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
485#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
486
487extern long strncpy_from_user(char *dst, const char __user *src, long count);
488extern long __strncpy_from_user(char *dst, const char __user *src, long count);
489extern long strnlen_user(const char __user *str, long n);
490#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
491extern unsigned long clear_user(void __user *mem, unsigned long len);
492extern unsigned long __clear_user(void __user *mem, unsigned long len);
493
494#endif
495