1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _ASM_TILE_UACCESS_H
16#define _ASM_TILE_UACCESS_H
17
18
19
20
21#include <linux/mm.h>
22#include <asm/processor.h>
23#include <asm/page.h>
24
25
26
27
28
29
30
31
32#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
33
34#define KERNEL_DS MAKE_MM_SEG(-1UL)
35#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
36
37#define get_ds() (KERNEL_DS)
38#define get_fs() (current_thread_info()->addr_limit)
39#define set_fs(x) (current_thread_info()->addr_limit = (x))
40
41#define segment_eq(a, b) ((a).seg == (b).seg)
42
43#ifndef __tilegx__
44
45
46
47
48
49
50static inline int is_arch_mappable_range(unsigned long addr,
51 unsigned long size)
52{
53 return (addr >= MEM_USER_INTRPT &&
54 addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
55 size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
56}
57#define is_arch_mappable_range is_arch_mappable_range
58#else
59#define is_arch_mappable_range(addr, size) 0
60#endif
61
62
63
64
65
66
67#define user_addr_max() (current_thread_info()->addr_limit.seg)
68
69
70
71
72
73int __range_ok(unsigned long addr, unsigned long size);
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define access_ok(type, addr, size) ({ \
96 __chk_user_ptr(addr); \
97 likely(__range_ok((unsigned long)(addr), (size)) == 0); \
98})
99
100#include <asm/extable.h>
101
102
103
104
105
106#define __inttype(x) \
107 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
108
109
110
111
112
113
114#ifdef __LP64__
115#define _ASM_PTR ".quad"
116#define _ASM_ALIGN ".align 8"
117#else
118#define _ASM_PTR ".long"
119#define _ASM_ALIGN ".align 4"
120#endif
121
122#define __get_user_asm(OP, x, ptr, ret) \
123 asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
124 ".pushsection .fixup,\"ax\"\n" \
125 "0: { movei %1, 0; movei %0, %3 }\n" \
126 "j 9f\n" \
127 ".section __ex_table,\"a\"\n" \
128 _ASM_ALIGN "\n" \
129 _ASM_PTR " 1b, 0b\n" \
130 ".popsection\n" \
131 "9:" \
132 : "=r" (ret), "=r" (x) \
133 : "r" (ptr), "i" (-EFAULT))
134
135#ifdef __tilegx__
136#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
137#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
138#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
139#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
140#else
141#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
142#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
143#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
144#ifdef __LITTLE_ENDIAN
145#define __lo32(a, b) a
146#define __hi32(a, b) b
147#else
148#define __lo32(a, b) b
149#define __hi32(a, b) a
150#endif
151#define __get_user_8(x, ptr, ret) \
152 ({ \
153 unsigned int __a, __b; \
154 asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n" \
155 "2: { lw %2, %2; movei %0, 0 }\n" \
156 ".pushsection .fixup,\"ax\"\n" \
157 "0: { movei %1, 0; movei %2, 0 }\n" \
158 "{ movei %0, %4; j 9f }\n" \
159 ".section __ex_table,\"a\"\n" \
160 ".align 4\n" \
161 ".word 1b, 0b\n" \
162 ".word 2b, 0b\n" \
163 ".popsection\n" \
164 "9:" \
165 : "=r" (ret), "=r" (__a), "=&r" (__b) \
166 : "r" (ptr), "i" (-EFAULT)); \
167 (x) = (__force __typeof(x))(__inttype(x)) \
168 (((u64)__hi32(__a, __b) << 32) | \
169 __lo32(__a, __b)); \
170 })
171#endif
172
173extern int __get_user_bad(void)
174 __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197#define __get_user(x, ptr) \
198 ({ \
199 int __ret; \
200 typeof(x) _x; \
201 __chk_user_ptr(ptr); \
202 switch (sizeof(*(ptr))) { \
203 case 1: __get_user_1(_x, ptr, __ret); break; \
204 case 2: __get_user_2(_x, ptr, __ret); break; \
205 case 4: __get_user_4(_x, ptr, __ret); break; \
206 case 8: __get_user_8(_x, ptr, __ret); break; \
207 default: __ret = __get_user_bad(); break; \
208 } \
209 (x) = (typeof(*(ptr))) _x; \
210 __ret; \
211 })
212
213
214
215#define __put_user_asm(OP, x, ptr, ret) \
216 asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
217 ".pushsection .fixup,\"ax\"\n" \
218 "0: { movei %0, %3; j 9f }\n" \
219 ".section __ex_table,\"a\"\n" \
220 _ASM_ALIGN "\n" \
221 _ASM_PTR " 1b, 0b\n" \
222 ".popsection\n" \
223 "9:" \
224 : "=r" (ret) \
225 : "r" (ptr), "r" (x), "i" (-EFAULT))
226
227#ifdef __tilegx__
228#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
229#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
230#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
231#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
232#else
233#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
234#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
235#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
236#define __put_user_8(x, ptr, ret) \
237 ({ \
238 u64 __x = (__force __inttype(x))(x); \
239 int __lo = (int) __x, __hi = (int) (__x >> 32); \
240 asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n" \
241 "2: { sw %0, %3; movei %0, 0 }\n" \
242 ".pushsection .fixup,\"ax\"\n" \
243 "0: { movei %0, %4; j 9f }\n" \
244 ".section __ex_table,\"a\"\n" \
245 ".align 4\n" \
246 ".word 1b, 0b\n" \
247 ".word 2b, 0b\n" \
248 ".popsection\n" \
249 "9:" \
250 : "=&r" (ret) \
251 : "r" (ptr), "r" (__lo32(__lo, __hi)), \
252 "r" (__hi32(__lo, __hi)), "i" (-EFAULT)); \
253 })
254#endif
255
256extern int __put_user_bad(void)
257 __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279#define __put_user(x, ptr) \
280({ \
281 int __ret; \
282 typeof(*(ptr)) _x = (x); \
283 __chk_user_ptr(ptr); \
284 switch (sizeof(*(ptr))) { \
285 case 1: __put_user_1(_x, ptr, __ret); break; \
286 case 2: __put_user_2(_x, ptr, __ret); break; \
287 case 4: __put_user_4(_x, ptr, __ret); break; \
288 case 8: __put_user_8(_x, ptr, __ret); break; \
289 default: __ret = __put_user_bad(); break; \
290 } \
291 __ret; \
292})
293
294
295
296
297
298
299#define put_user(x, ptr) \
300({ \
301 __typeof__(*(ptr)) __user *__Pu_addr = (ptr); \
302 access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ? \
303 __put_user((x), (__Pu_addr)) : \
304 -EFAULT; \
305})
306
307#define get_user(x, ptr) \
308({ \
309 __typeof__(*(ptr)) const __user *__Gu_addr = (ptr); \
310 access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ? \
311 __get_user((x), (__Gu_addr)) : \
312 ((x) = 0, -EFAULT); \
313})
314
315extern unsigned long __must_check
316raw_copy_to_user(void __user *to, const void *from, unsigned long n);
317extern unsigned long __must_check
318raw_copy_from_user(void *to, const void __user *from, unsigned long n);
319#define INLINE_COPY_FROM_USER
320#define INLINE_COPY_TO_USER
321
322#ifdef __tilegx__
323extern unsigned long raw_copy_in_user(
324 void __user *to, const void __user *from, unsigned long n);
325#endif
326
327
328extern long strnlen_user(const char __user *str, long n);
329extern long strncpy_from_user(char *dst, const char __user *src, long);
330
331
332
333
334
335
336
337
338
339
340
341extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
342static inline unsigned long __must_check __clear_user(
343 void __user *mem, unsigned long len)
344{
345 might_fault();
346 return clear_user_asm(mem, len);
347}
348static inline unsigned long __must_check clear_user(
349 void __user *mem, unsigned long len)
350{
351 if (access_ok(VERIFY_WRITE, mem, len))
352 return __clear_user(mem, len);
353 return len;
354}
355
356
357
358
359
360
361
362
363
364extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
365static inline unsigned long __must_check __flush_user(
366 void __user *mem, unsigned long len)
367{
368 int retval;
369
370 might_fault();
371 retval = flush_user_asm(mem, len);
372 mb_incoherent();
373 return retval;
374}
375
376static inline unsigned long __must_check flush_user(
377 void __user *mem, unsigned long len)
378{
379 if (access_ok(VERIFY_WRITE, mem, len))
380 return __flush_user(mem, len);
381 return len;
382}
383
384
385
386
387
388
389
390
391
392extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
393static inline unsigned long __must_check __finv_user(
394 void __user *mem, unsigned long len)
395{
396 int retval;
397
398 might_fault();
399 retval = finv_user_asm(mem, len);
400 mb_incoherent();
401 return retval;
402}
403static inline unsigned long __must_check finv_user(
404 void __user *mem, unsigned long len)
405{
406 if (access_ok(VERIFY_WRITE, mem, len))
407 return __finv_user(mem, len);
408 return len;
409}
410
411#endif
412