1
2#ifndef _ARCH_POWERPC_UACCESS_H
3#define _ARCH_POWERPC_UACCESS_H
4
5#include <asm/ppc_asm.h>
6#include <asm/processor.h>
7#include <asm/page.h>
8#include <asm/extable.h>
9#include <asm/kup.h>
10
11
12
13
14
15
16
17
18
19
20
21
22#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
23
24#define KERNEL_DS MAKE_MM_SEG(~0UL)
25#ifdef __powerpc64__
26
27#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
28#else
29#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
30#endif
31
32#define get_fs() (current->thread.addr_limit)
33
34static inline void set_fs(mm_segment_t fs)
35{
36 current->thread.addr_limit = fs;
37
38 set_thread_flag(TIF_FSCHECK);
39}
40
41#define segment_eq(a, b) ((a).seg == (b).seg)
42
43#define user_addr_max() (get_fs().seg)
44
45#ifdef __powerpc64__
46
47
48
49
50#define __access_ok(addr, size, segment) \
51 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
52
53#else
54
55static inline int __access_ok(unsigned long addr, unsigned long size,
56 mm_segment_t seg)
57{
58 if (addr > seg.seg)
59 return 0;
60 return (size == 0 || size - 1 <= seg.seg - addr);
61}
62
63#endif
64
65#define access_ok(addr, size) \
66 (__chk_user_ptr(addr), \
67 __access_ok((__force unsigned long)(addr), (size), get_fs()))
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88#define get_user(x, ptr) \
89 __get_user_check((x), (ptr), sizeof(*(ptr)))
90#define put_user(x, ptr) \
91 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
92
93#define __get_user(x, ptr) \
94 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
95#define __put_user(x, ptr) \
96 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
97
98#define __get_user_allowed(x, ptr) \
99 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
100#define __put_user_allowed(x, ptr) \
101 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
102
103#define __get_user_inatomic(x, ptr) \
104 __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
105#define __put_user_inatomic(x, ptr) \
106 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
107
108extern long __put_user_bad(void);
109
110
111
112
113
114
115#define __put_user_asm(x, addr, err, op) \
116 __asm__ __volatile__( \
117 "1: " op " %1,0(%2) # put_user\n" \
118 "2:\n" \
119 ".section .fixup,\"ax\"\n" \
120 "3: li %0,%3\n" \
121 " b 2b\n" \
122 ".previous\n" \
123 EX_TABLE(1b, 3b) \
124 : "=r" (err) \
125 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
126
127#ifdef __powerpc64__
128#define __put_user_asm2(x, ptr, retval) \
129 __put_user_asm(x, ptr, retval, "std")
130#else
131#define __put_user_asm2(x, addr, err) \
132 __asm__ __volatile__( \
133 "1: stw %1,0(%2)\n" \
134 "2: stw %1+1,4(%2)\n" \
135 "3:\n" \
136 ".section .fixup,\"ax\"\n" \
137 "4: li %0,%3\n" \
138 " b 3b\n" \
139 ".previous\n" \
140 EX_TABLE(1b, 4b) \
141 EX_TABLE(2b, 4b) \
142 : "=r" (err) \
143 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
144#endif
145
146#define __put_user_size_allowed(x, ptr, size, retval) \
147do { \
148 retval = 0; \
149 switch (size) { \
150 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
151 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
152 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
153 case 8: __put_user_asm2(x, ptr, retval); break; \
154 default: __put_user_bad(); \
155 } \
156} while (0)
157
158#define __put_user_size(x, ptr, size, retval) \
159do { \
160 allow_write_to_user(ptr, size); \
161 __put_user_size_allowed(x, ptr, size, retval); \
162 prevent_write_to_user(ptr, size); \
163} while (0)
164
165#define __put_user_nocheck(x, ptr, size, do_allow) \
166({ \
167 long __pu_err; \
168 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
169 __typeof__(*(ptr)) __pu_val = (x); \
170 __typeof__(size) __pu_size = (size); \
171 \
172 if (!is_kernel_addr((unsigned long)__pu_addr)) \
173 might_fault(); \
174 __chk_user_ptr(__pu_addr); \
175 if (do_allow) \
176 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
177 else \
178 __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \
179 \
180 __pu_err; \
181})
182
183#define __put_user_check(x, ptr, size) \
184({ \
185 long __pu_err = -EFAULT; \
186 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
187 __typeof__(*(ptr)) __pu_val = (x); \
188 __typeof__(size) __pu_size = (size); \
189 \
190 might_fault(); \
191 if (access_ok(__pu_addr, __pu_size)) \
192 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
193 \
194 __pu_err; \
195})
196
197#define __put_user_nosleep(x, ptr, size) \
198({ \
199 long __pu_err; \
200 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
201 __typeof__(*(ptr)) __pu_val = (x); \
202 __typeof__(size) __pu_size = (size); \
203 \
204 __chk_user_ptr(__pu_addr); \
205 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
206 \
207 __pu_err; \
208})
209
210
211extern long __get_user_bad(void);
212
213
214
215
216
217#define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
218 __asm__ __volatile__( \
219 "1: lvx 0,0,%1 # get user\n" \
220 " stvx 0,0,%2 # put kernel\n" \
221 "2:\n" \
222 ".section .fixup,\"ax\"\n" \
223 "3: li %0,%3\n" \
224 " b 2b\n" \
225 ".previous\n" \
226 EX_TABLE(1b, 3b) \
227 : "=r" (err) \
228 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
229
230#define __get_user_asm(x, addr, err, op) \
231 __asm__ __volatile__( \
232 "1: "op" %1,0(%2) # get_user\n" \
233 "2:\n" \
234 ".section .fixup,\"ax\"\n" \
235 "3: li %0,%3\n" \
236 " li %1,0\n" \
237 " b 2b\n" \
238 ".previous\n" \
239 EX_TABLE(1b, 3b) \
240 : "=r" (err), "=r" (x) \
241 : "b" (addr), "i" (-EFAULT), "0" (err))
242
243#ifdef __powerpc64__
244#define __get_user_asm2(x, addr, err) \
245 __get_user_asm(x, addr, err, "ld")
246#else
247#define __get_user_asm2(x, addr, err) \
248 __asm__ __volatile__( \
249 "1: lwz %1,0(%2)\n" \
250 "2: lwz %1+1,4(%2)\n" \
251 "3:\n" \
252 ".section .fixup,\"ax\"\n" \
253 "4: li %0,%3\n" \
254 " li %1,0\n" \
255 " li %1+1,0\n" \
256 " b 3b\n" \
257 ".previous\n" \
258 EX_TABLE(1b, 4b) \
259 EX_TABLE(2b, 4b) \
260 : "=r" (err), "=&r" (x) \
261 : "b" (addr), "i" (-EFAULT), "0" (err))
262#endif
263
264#define __get_user_size_allowed(x, ptr, size, retval) \
265do { \
266 retval = 0; \
267 __chk_user_ptr(ptr); \
268 if (size > sizeof(x)) \
269 (x) = __get_user_bad(); \
270 switch (size) { \
271 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
272 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
273 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
274 case 8: __get_user_asm2(x, ptr, retval); break; \
275 default: (x) = __get_user_bad(); \
276 } \
277} while (0)
278
279#define __get_user_size(x, ptr, size, retval) \
280do { \
281 allow_read_from_user(ptr, size); \
282 __get_user_size_allowed(x, ptr, size, retval); \
283 prevent_read_from_user(ptr, size); \
284} while (0)
285
286
287
288
289
290#define __long_type(x) \
291 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
292
293#define __get_user_nocheck(x, ptr, size, do_allow) \
294({ \
295 long __gu_err; \
296 __long_type(*(ptr)) __gu_val; \
297 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
298 __typeof__(size) __gu_size = (size); \
299 \
300 __chk_user_ptr(__gu_addr); \
301 if (!is_kernel_addr((unsigned long)__gu_addr)) \
302 might_fault(); \
303 barrier_nospec(); \
304 if (do_allow) \
305 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
306 else \
307 __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
308 (x) = (__typeof__(*(ptr)))__gu_val; \
309 \
310 __gu_err; \
311})
312
313#define __get_user_check(x, ptr, size) \
314({ \
315 long __gu_err = -EFAULT; \
316 __long_type(*(ptr)) __gu_val = 0; \
317 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
318 __typeof__(size) __gu_size = (size); \
319 \
320 might_fault(); \
321 if (access_ok(__gu_addr, __gu_size)) { \
322 barrier_nospec(); \
323 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
324 } \
325 (x) = (__force __typeof__(*(ptr)))__gu_val; \
326 \
327 __gu_err; \
328})
329
330#define __get_user_nosleep(x, ptr, size) \
331({ \
332 long __gu_err; \
333 __long_type(*(ptr)) __gu_val; \
334 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
335 __typeof__(size) __gu_size = (size); \
336 \
337 __chk_user_ptr(__gu_addr); \
338 barrier_nospec(); \
339 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
340 (x) = (__force __typeof__(*(ptr)))__gu_val; \
341 \
342 __gu_err; \
343})
344
345
346
347
348extern unsigned long __copy_tofrom_user(void __user *to,
349 const void __user *from, unsigned long size);
350
351#ifdef __powerpc64__
352static inline unsigned long
353raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
354{
355 unsigned long ret;
356
357 barrier_nospec();
358 allow_read_write_user(to, from, n);
359 ret = __copy_tofrom_user(to, from, n);
360 prevent_read_write_user(to, from, n);
361 return ret;
362}
363#endif
364
365static inline unsigned long raw_copy_from_user(void *to,
366 const void __user *from, unsigned long n)
367{
368 unsigned long ret;
369 if (__builtin_constant_p(n) && (n <= 8)) {
370 ret = 1;
371
372 switch (n) {
373 case 1:
374 barrier_nospec();
375 __get_user_size(*(u8 *)to, from, 1, ret);
376 break;
377 case 2:
378 barrier_nospec();
379 __get_user_size(*(u16 *)to, from, 2, ret);
380 break;
381 case 4:
382 barrier_nospec();
383 __get_user_size(*(u32 *)to, from, 4, ret);
384 break;
385 case 8:
386 barrier_nospec();
387 __get_user_size(*(u64 *)to, from, 8, ret);
388 break;
389 }
390 if (ret == 0)
391 return 0;
392 }
393
394 barrier_nospec();
395 allow_read_from_user(from, n);
396 ret = __copy_tofrom_user((__force void __user *)to, from, n);
397 prevent_read_from_user(from, n);
398 return ret;
399}
400
401static inline unsigned long
402raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
403{
404 if (__builtin_constant_p(n) && (n <= 8)) {
405 unsigned long ret = 1;
406
407 switch (n) {
408 case 1:
409 __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
410 break;
411 case 2:
412 __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
413 break;
414 case 4:
415 __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
416 break;
417 case 8:
418 __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
419 break;
420 }
421 if (ret == 0)
422 return 0;
423 }
424
425 return __copy_tofrom_user(to, (__force const void __user *)from, n);
426}
427
428static inline unsigned long
429raw_copy_to_user(void __user *to, const void *from, unsigned long n)
430{
431 unsigned long ret;
432
433 allow_write_to_user(to, n);
434 ret = raw_copy_to_user_allowed(to, from, n);
435 prevent_write_to_user(to, n);
436 return ret;
437}
438
439static __always_inline unsigned long __must_check
440copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
441{
442 if (likely(check_copy_size(from, n, true))) {
443 if (access_ok(to, n)) {
444 allow_write_to_user(to, n);
445 n = memcpy_mcsafe((void *)to, from, n);
446 prevent_write_to_user(to, n);
447 }
448 }
449
450 return n;
451}
452
453unsigned long __arch_clear_user(void __user *addr, unsigned long size);
454
455static inline unsigned long clear_user(void __user *addr, unsigned long size)
456{
457 unsigned long ret = size;
458 might_fault();
459 if (likely(access_ok(addr, size))) {
460 allow_write_to_user(addr, size);
461 ret = __arch_clear_user(addr, size);
462 prevent_write_to_user(addr, size);
463 }
464 return ret;
465}
466
467static inline unsigned long __clear_user(void __user *addr, unsigned long size)
468{
469 return clear_user(addr, size);
470}
471
472extern long strncpy_from_user(char *dst, const char __user *src, long count);
473extern __must_check long strnlen_user(const char __user *str, long n);
474
475extern long __copy_from_user_flushcache(void *dst, const void __user *src,
476 unsigned size);
477extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
478 size_t len);
479
480static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
481{
482 if (unlikely(!access_ok(ptr, len)))
483 return false;
484 allow_read_write_user((void __user *)ptr, ptr, len);
485 return true;
486}
487#define user_access_begin user_access_begin
488#define user_access_end prevent_current_access_user
489#define user_access_save prevent_user_access_return
490#define user_access_restore restore_user_access
491
492#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
493#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
494#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
495#define unsafe_copy_to_user(d, s, l, e) \
496 unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
497
498#endif
499