1
2
3
4
5
6
7#ifndef __ASM_UACCESS_H
8#define __ASM_UACCESS_H
9
10#include <asm/alternative.h>
11#include <asm/kernel-pgtable.h>
12#include <asm/sysreg.h>
13
14
15
16
17#include <linux/bitops.h>
18#include <linux/kasan-checks.h>
19#include <linux/string.h>
20
21#include <asm/cpufeature.h>
22#include <asm/mmu.h>
23#include <asm/ptrace.h>
24#include <asm/memory.h>
25#include <asm/extable.h>
26
27#define get_fs() (current_thread_info()->addr_limit)
28
29static inline void set_fs(mm_segment_t fs)
30{
31 current_thread_info()->addr_limit = fs;
32
33
34
35
36
37 spec_bar();
38
39
40 set_thread_flag(TIF_FSCHECK);
41
42
43
44
45
46 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
47 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
48 else
49 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
50 CONFIG_ARM64_UAO));
51}
52
53#define uaccess_kernel() (get_fs() == KERNEL_DS)
54
55
56
57
58
59
60
61
62static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
63{
64 unsigned long ret, limit = current_thread_info()->addr_limit;
65
66
67
68
69
70
71 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
72 (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
73 addr = untagged_addr(addr);
74
75 __chk_user_ptr(addr);
76 asm volatile(
77
78
79 " adds %0, %3, %2\n"
80
81 " csel %1, xzr, %1, hi\n"
82
83
84
85 " csinv %0, %0, xzr, cc\n"
86
87
88
89 " sbcs xzr, %0, %1\n"
90 " cset %0, ls\n"
91 : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
92
93 return ret;
94}
95
96#define access_ok(addr, size) __range_ok(addr, size)
97#define user_addr_max get_fs
98
99#define _ASM_EXTABLE(from, to) \
100 " .pushsection __ex_table, \"a\"\n" \
101 " .align 3\n" \
102 " .long (" #from " - .), (" #to " - .)\n" \
103 " .popsection\n"
104
105
106
107
108#ifdef CONFIG_ARM64_SW_TTBR0_PAN
109static inline void __uaccess_ttbr0_disable(void)
110{
111 unsigned long flags, ttbr;
112
113 local_irq_save(flags);
114 ttbr = read_sysreg(ttbr1_el1);
115 ttbr &= ~TTBR_ASID_MASK;
116
117 write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
118 isb();
119
120 write_sysreg(ttbr, ttbr1_el1);
121 isb();
122 local_irq_restore(flags);
123}
124
125static inline void __uaccess_ttbr0_enable(void)
126{
127 unsigned long flags, ttbr0, ttbr1;
128
129
130
131
132
133
134 local_irq_save(flags);
135 ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
136
137
138 ttbr1 = read_sysreg(ttbr1_el1);
139 ttbr1 &= ~TTBR_ASID_MASK;
140 ttbr1 |= ttbr0 & TTBR_ASID_MASK;
141 write_sysreg(ttbr1, ttbr1_el1);
142 isb();
143
144
145 write_sysreg(ttbr0, ttbr0_el1);
146 isb();
147 local_irq_restore(flags);
148}
149
150static inline bool uaccess_ttbr0_disable(void)
151{
152 if (!system_uses_ttbr0_pan())
153 return false;
154 __uaccess_ttbr0_disable();
155 return true;
156}
157
158static inline bool uaccess_ttbr0_enable(void)
159{
160 if (!system_uses_ttbr0_pan())
161 return false;
162 __uaccess_ttbr0_enable();
163 return true;
164}
165#else
166static inline bool uaccess_ttbr0_disable(void)
167{
168 return false;
169}
170
171static inline bool uaccess_ttbr0_enable(void)
172{
173 return false;
174}
175#endif
176
177static inline void __uaccess_disable_hw_pan(void)
178{
179 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
180 CONFIG_ARM64_PAN));
181}
182
183static inline void __uaccess_enable_hw_pan(void)
184{
185 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
186 CONFIG_ARM64_PAN));
187}
188
189#define __uaccess_disable(alt) \
190do { \
191 if (!uaccess_ttbr0_disable()) \
192 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
193 CONFIG_ARM64_PAN)); \
194} while (0)
195
196#define __uaccess_enable(alt) \
197do { \
198 if (!uaccess_ttbr0_enable()) \
199 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
200 CONFIG_ARM64_PAN)); \
201} while (0)
202
203static inline void uaccess_disable(void)
204{
205 __uaccess_disable(ARM64_HAS_PAN);
206}
207
208static inline void uaccess_enable(void)
209{
210 __uaccess_enable(ARM64_HAS_PAN);
211}
212
213
214
215
216static inline void uaccess_disable_not_uao(void)
217{
218 __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
219}
220
221static inline void uaccess_enable_not_uao(void)
222{
223 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
224}
225
226
227
228
229
230
231#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
232static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
233{
234 void __user *safe_ptr;
235
236 asm volatile(
237 " bics xzr, %3, %2\n"
238 " csel %0, %1, xzr, eq\n"
239 : "=&r" (safe_ptr)
240 : "r" (ptr), "r" (current_thread_info()->addr_limit),
241 "r" (untagged_addr(ptr))
242 : "cc");
243
244 csdb();
245 return safe_ptr;
246}
247
248
249
250
251
252
253
254
255
256#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
257 asm volatile( \
258 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
259 alt_instr " " reg "1, [%2]\n", feature) \
260 "2:\n" \
261 " .section .fixup, \"ax\"\n" \
262 " .align 2\n" \
263 "3: mov %w0, %3\n" \
264 " mov %1, #0\n" \
265 " b 2b\n" \
266 " .previous\n" \
267 _ASM_EXTABLE(1b, 3b) \
268 : "+r" (err), "=&r" (x) \
269 : "r" (addr), "i" (-EFAULT))
270
271#define __raw_get_user(x, ptr, err) \
272do { \
273 unsigned long __gu_val; \
274 __chk_user_ptr(ptr); \
275 uaccess_enable_not_uao(); \
276 switch (sizeof(*(ptr))) { \
277 case 1: \
278 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
279 (err), ARM64_HAS_UAO); \
280 break; \
281 case 2: \
282 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
283 (err), ARM64_HAS_UAO); \
284 break; \
285 case 4: \
286 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
287 (err), ARM64_HAS_UAO); \
288 break; \
289 case 8: \
290 __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
291 (err), ARM64_HAS_UAO); \
292 break; \
293 default: \
294 BUILD_BUG(); \
295 } \
296 uaccess_disable_not_uao(); \
297 (x) = (__force __typeof__(*(ptr)))__gu_val; \
298} while (0)
299
300#define __get_user_error(x, ptr, err) \
301do { \
302 __typeof__(*(ptr)) __user *__p = (ptr); \
303 might_fault(); \
304 if (access_ok(__p, sizeof(*__p))) { \
305 __p = uaccess_mask_ptr(__p); \
306 __raw_get_user((x), __p, (err)); \
307 } else { \
308 (x) = (__force __typeof__(x))0; (err) = -EFAULT; \
309 } \
310} while (0)
311
312#define __get_user(x, ptr) \
313({ \
314 int __gu_err = 0; \
315 __get_user_error((x), (ptr), __gu_err); \
316 __gu_err; \
317})
318
319#define get_user __get_user
320
321#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
322 asm volatile( \
323 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
324 alt_instr " " reg "1, [%2]\n", feature) \
325 "2:\n" \
326 " .section .fixup,\"ax\"\n" \
327 " .align 2\n" \
328 "3: mov %w0, %3\n" \
329 " b 2b\n" \
330 " .previous\n" \
331 _ASM_EXTABLE(1b, 3b) \
332 : "+r" (err) \
333 : "r" (x), "r" (addr), "i" (-EFAULT))
334
335#define __raw_put_user(x, ptr, err) \
336do { \
337 __typeof__(*(ptr)) __pu_val = (x); \
338 __chk_user_ptr(ptr); \
339 uaccess_enable_not_uao(); \
340 switch (sizeof(*(ptr))) { \
341 case 1: \
342 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
343 (err), ARM64_HAS_UAO); \
344 break; \
345 case 2: \
346 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
347 (err), ARM64_HAS_UAO); \
348 break; \
349 case 4: \
350 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
351 (err), ARM64_HAS_UAO); \
352 break; \
353 case 8: \
354 __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
355 (err), ARM64_HAS_UAO); \
356 break; \
357 default: \
358 BUILD_BUG(); \
359 } \
360 uaccess_disable_not_uao(); \
361} while (0)
362
363#define __put_user_error(x, ptr, err) \
364do { \
365 __typeof__(*(ptr)) __user *__p = (ptr); \
366 might_fault(); \
367 if (access_ok(__p, sizeof(*__p))) { \
368 __p = uaccess_mask_ptr(__p); \
369 __raw_put_user((x), __p, (err)); \
370 } else { \
371 (err) = -EFAULT; \
372 } \
373} while (0)
374
375#define __put_user(x, ptr) \
376({ \
377 int __pu_err = 0; \
378 __put_user_error((x), (ptr), __pu_err); \
379 __pu_err; \
380})
381
382#define put_user __put_user
383
384extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
385#define raw_copy_from_user(to, from, n) \
386({ \
387 unsigned long __acfu_ret; \
388 uaccess_enable_not_uao(); \
389 __acfu_ret = __arch_copy_from_user((to), \
390 __uaccess_mask_ptr(from), (n)); \
391 uaccess_disable_not_uao(); \
392 __acfu_ret; \
393})
394
395extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
396#define raw_copy_to_user(to, from, n) \
397({ \
398 unsigned long __actu_ret; \
399 uaccess_enable_not_uao(); \
400 __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
401 (from), (n)); \
402 uaccess_disable_not_uao(); \
403 __actu_ret; \
404})
405
406extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
407#define raw_copy_in_user(to, from, n) \
408({ \
409 unsigned long __aciu_ret; \
410 uaccess_enable_not_uao(); \
411 __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
412 __uaccess_mask_ptr(from), (n)); \
413 uaccess_disable_not_uao(); \
414 __aciu_ret; \
415})
416
417#define INLINE_COPY_TO_USER
418#define INLINE_COPY_FROM_USER
419
420extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
421static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
422{
423 if (access_ok(to, n)) {
424 uaccess_enable_not_uao();
425 n = __arch_clear_user(__uaccess_mask_ptr(to), n);
426 uaccess_disable_not_uao();
427 }
428 return n;
429}
430#define clear_user __clear_user
431
432extern long strncpy_from_user(char *dest, const char __user *src, long count);
433
434extern __must_check long strnlen_user(const char __user *str, long n);
435
436#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
437struct page;
438void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
439extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
440
441static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
442{
443 kasan_check_write(dst, size);
444 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
445}
446#endif
447
448#endif
449