1
2
3
4
5
6
7#ifndef __ASM_UACCESS_H
8#define __ASM_UACCESS_H
9
10#include <asm/alternative.h>
11#include <asm/kernel-pgtable.h>
12#include <asm/sysreg.h>
13
14
15
16
17#include <linux/bitops.h>
18#include <linux/kasan-checks.h>
19#include <linux/string.h>
20
21#include <asm/cpufeature.h>
22#include <asm/ptrace.h>
23#include <asm/memory.h>
24#include <asm/extable.h>
25
26#define get_fs() (current_thread_info()->addr_limit)
27
28static inline void set_fs(mm_segment_t fs)
29{
30 current_thread_info()->addr_limit = fs;
31
32
33
34
35
36 spec_bar();
37
38
39 set_thread_flag(TIF_FSCHECK);
40
41
42
43
44
45 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
46 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
47 else
48 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
49 CONFIG_ARM64_UAO));
50}
51
52#define segment_eq(a, b) ((a) == (b))
53
54
55
56
57
58
59
60
61static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
62{
63 unsigned long ret, limit = current_thread_info()->addr_limit;
64
65 __chk_user_ptr(addr);
66 asm volatile(
67
68
69 " adds %0, %3, %2\n"
70
71 " csel %1, xzr, %1, hi\n"
72
73
74
75 " csinv %0, %0, xzr, cc\n"
76
77
78
79 " sbcs xzr, %0, %1\n"
80 " cset %0, ls\n"
81 : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
82
83 return ret;
84}
85
86#define access_ok(addr, size) __range_ok(addr, size)
87#define user_addr_max get_fs
88
89#define _ASM_EXTABLE(from, to) \
90 " .pushsection __ex_table, \"a\"\n" \
91 " .align 3\n" \
92 " .long (" #from " - .), (" #to " - .)\n" \
93 " .popsection\n"
94
95
96
97
98#ifdef CONFIG_ARM64_SW_TTBR0_PAN
99static inline void __uaccess_ttbr0_disable(void)
100{
101 unsigned long flags, ttbr;
102
103 local_irq_save(flags);
104 ttbr = read_sysreg(ttbr1_el1);
105 ttbr &= ~TTBR_ASID_MASK;
106
107 write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
108 isb();
109
110 write_sysreg(ttbr, ttbr1_el1);
111 isb();
112 local_irq_restore(flags);
113}
114
115static inline void __uaccess_ttbr0_enable(void)
116{
117 unsigned long flags, ttbr0, ttbr1;
118
119
120
121
122
123
124 local_irq_save(flags);
125 ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
126
127
128 ttbr1 = read_sysreg(ttbr1_el1);
129 ttbr1 &= ~TTBR_ASID_MASK;
130 ttbr1 |= ttbr0 & TTBR_ASID_MASK;
131 write_sysreg(ttbr1, ttbr1_el1);
132 isb();
133
134
135 write_sysreg(ttbr0, ttbr0_el1);
136 isb();
137 local_irq_restore(flags);
138}
139
140static inline bool uaccess_ttbr0_disable(void)
141{
142 if (!system_uses_ttbr0_pan())
143 return false;
144 __uaccess_ttbr0_disable();
145 return true;
146}
147
148static inline bool uaccess_ttbr0_enable(void)
149{
150 if (!system_uses_ttbr0_pan())
151 return false;
152 __uaccess_ttbr0_enable();
153 return true;
154}
155#else
156static inline bool uaccess_ttbr0_disable(void)
157{
158 return false;
159}
160
161static inline bool uaccess_ttbr0_enable(void)
162{
163 return false;
164}
165#endif
166
167static inline void __uaccess_disable_hw_pan(void)
168{
169 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
170 CONFIG_ARM64_PAN));
171}
172
173static inline void __uaccess_enable_hw_pan(void)
174{
175 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
176 CONFIG_ARM64_PAN));
177}
178
179#define __uaccess_disable(alt) \
180do { \
181 if (!uaccess_ttbr0_disable()) \
182 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
183 CONFIG_ARM64_PAN)); \
184} while (0)
185
186#define __uaccess_enable(alt) \
187do { \
188 if (!uaccess_ttbr0_enable()) \
189 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
190 CONFIG_ARM64_PAN)); \
191} while (0)
192
193static inline void uaccess_disable(void)
194{
195 __uaccess_disable(ARM64_HAS_PAN);
196}
197
198static inline void uaccess_enable(void)
199{
200 __uaccess_enable(ARM64_HAS_PAN);
201}
202
203
204
205
206static inline void uaccess_disable_not_uao(void)
207{
208 __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
209}
210
211static inline void uaccess_enable_not_uao(void)
212{
213 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
214}
215
216
217
218
219
220#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
221static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
222{
223 void __user *safe_ptr;
224
225 asm volatile(
226 " bics xzr, %1, %2\n"
227 " csel %0, %1, xzr, eq\n"
228 : "=&r" (safe_ptr)
229 : "r" (ptr), "r" (current_thread_info()->addr_limit)
230 : "cc");
231
232 csdb();
233 return safe_ptr;
234}
235
236
237
238
239
240
241
242
243
244#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
245 asm volatile( \
246 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
247 alt_instr " " reg "1, [%2]\n", feature) \
248 "2:\n" \
249 " .section .fixup, \"ax\"\n" \
250 " .align 2\n" \
251 "3: mov %w0, %3\n" \
252 " mov %1, #0\n" \
253 " b 2b\n" \
254 " .previous\n" \
255 _ASM_EXTABLE(1b, 3b) \
256 : "+r" (err), "=&r" (x) \
257 : "r" (addr), "i" (-EFAULT))
258
259#define __raw_get_user(x, ptr, err) \
260do { \
261 unsigned long __gu_val; \
262 __chk_user_ptr(ptr); \
263 uaccess_enable_not_uao(); \
264 switch (sizeof(*(ptr))) { \
265 case 1: \
266 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
267 (err), ARM64_HAS_UAO); \
268 break; \
269 case 2: \
270 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
271 (err), ARM64_HAS_UAO); \
272 break; \
273 case 4: \
274 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
275 (err), ARM64_HAS_UAO); \
276 break; \
277 case 8: \
278 __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
279 (err), ARM64_HAS_UAO); \
280 break; \
281 default: \
282 BUILD_BUG(); \
283 } \
284 uaccess_disable_not_uao(); \
285 (x) = (__force __typeof__(*(ptr)))__gu_val; \
286} while (0)
287
288#define __get_user_error(x, ptr, err) \
289do { \
290 __typeof__(*(ptr)) __user *__p = (ptr); \
291 might_fault(); \
292 if (access_ok(__p, sizeof(*__p))) { \
293 __p = uaccess_mask_ptr(__p); \
294 __raw_get_user((x), __p, (err)); \
295 } else { \
296 (x) = 0; (err) = -EFAULT; \
297 } \
298} while (0)
299
300#define __get_user(x, ptr) \
301({ \
302 int __gu_err = 0; \
303 __get_user_error((x), (ptr), __gu_err); \
304 __gu_err; \
305})
306
307#define get_user __get_user
308
309#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
310 asm volatile( \
311 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
312 alt_instr " " reg "1, [%2]\n", feature) \
313 "2:\n" \
314 " .section .fixup,\"ax\"\n" \
315 " .align 2\n" \
316 "3: mov %w0, %3\n" \
317 " b 2b\n" \
318 " .previous\n" \
319 _ASM_EXTABLE(1b, 3b) \
320 : "+r" (err) \
321 : "r" (x), "r" (addr), "i" (-EFAULT))
322
323#define __raw_put_user(x, ptr, err) \
324do { \
325 __typeof__(*(ptr)) __pu_val = (x); \
326 __chk_user_ptr(ptr); \
327 uaccess_enable_not_uao(); \
328 switch (sizeof(*(ptr))) { \
329 case 1: \
330 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
331 (err), ARM64_HAS_UAO); \
332 break; \
333 case 2: \
334 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
335 (err), ARM64_HAS_UAO); \
336 break; \
337 case 4: \
338 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
339 (err), ARM64_HAS_UAO); \
340 break; \
341 case 8: \
342 __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
343 (err), ARM64_HAS_UAO); \
344 break; \
345 default: \
346 BUILD_BUG(); \
347 } \
348 uaccess_disable_not_uao(); \
349} while (0)
350
351#define __put_user_error(x, ptr, err) \
352do { \
353 __typeof__(*(ptr)) __user *__p = (ptr); \
354 might_fault(); \
355 if (access_ok(__p, sizeof(*__p))) { \
356 __p = uaccess_mask_ptr(__p); \
357 __raw_put_user((x), __p, (err)); \
358 } else { \
359 (err) = -EFAULT; \
360 } \
361} while (0)
362
363#define __put_user(x, ptr) \
364({ \
365 int __pu_err = 0; \
366 __put_user_error((x), (ptr), __pu_err); \
367 __pu_err; \
368})
369
370#define put_user __put_user
371
372extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
373#define raw_copy_from_user(to, from, n) \
374({ \
375 __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
376})
377
378extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
379#define raw_copy_to_user(to, from, n) \
380({ \
381 __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
382})
383
384extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
385#define raw_copy_in_user(to, from, n) \
386({ \
387 __arch_copy_in_user(__uaccess_mask_ptr(to), \
388 __uaccess_mask_ptr(from), (n)); \
389})
390
391#define INLINE_COPY_TO_USER
392#define INLINE_COPY_FROM_USER
393
394extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
395static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
396{
397 if (access_ok(to, n))
398 n = __arch_clear_user(__uaccess_mask_ptr(to), n);
399 return n;
400}
401#define clear_user __clear_user
402
403extern long strncpy_from_user(char *dest, const char __user *src, long count);
404
405extern __must_check long strnlen_user(const char __user *str, long n);
406
407#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
408struct page;
409void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
410extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
411
412static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
413{
414 kasan_check_write(dst, size);
415 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
416}
417#endif
418
419#endif
420