1
2#ifndef __LINUX_UACCESS_H__
3#define __LINUX_UACCESS_H__
4
5#include <linux/sched.h>
6#include <linux/thread_info.h>
7#include <linux/kasan-checks.h>
8
9#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
10
11#include <asm/uaccess.h>
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58static __always_inline __must_check unsigned long
59__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
60{
61 kasan_check_write(to, n);
62 check_object_size(to, n, false);
63 return raw_copy_from_user(to, from, n);
64}
65
66static __always_inline __must_check unsigned long
67__copy_from_user(void *to, const void __user *from, unsigned long n)
68{
69 might_fault();
70 kasan_check_write(to, n);
71 check_object_size(to, n, false);
72 return raw_copy_from_user(to, from, n);
73}
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88static __always_inline __must_check unsigned long
89__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
90{
91 kasan_check_read(from, n);
92 check_object_size(from, n, true);
93 return raw_copy_to_user(to, from, n);
94}
95
96static __always_inline __must_check unsigned long
97__copy_to_user(void __user *to, const void *from, unsigned long n)
98{
99 might_fault();
100 kasan_check_read(from, n);
101 check_object_size(from, n, true);
102 return raw_copy_to_user(to, from, n);
103}
104
105#ifdef INLINE_COPY_FROM_USER
106static inline __must_check unsigned long
107_copy_from_user(void *to, const void __user *from, unsigned long n)
108{
109 unsigned long res = n;
110 might_fault();
111 if (likely(access_ok(from, n))) {
112 kasan_check_write(to, n);
113 res = raw_copy_from_user(to, from, n);
114 }
115 if (unlikely(res))
116 memset(to + (n - res), 0, res);
117 return res;
118}
119#else
120extern __must_check unsigned long
121_copy_from_user(void *, const void __user *, unsigned long);
122#endif
123
124#ifdef INLINE_COPY_TO_USER
125static inline __must_check unsigned long
126_copy_to_user(void __user *to, const void *from, unsigned long n)
127{
128 might_fault();
129 if (access_ok(to, n)) {
130 kasan_check_read(from, n);
131 n = raw_copy_to_user(to, from, n);
132 }
133 return n;
134}
135#else
136extern __must_check unsigned long
137_copy_to_user(void __user *, const void *, unsigned long);
138#endif
139
140static __always_inline unsigned long __must_check
141copy_from_user(void *to, const void __user *from, unsigned long n)
142{
143 if (likely(check_copy_size(to, n, false)))
144 n = _copy_from_user(to, from, n);
145 return n;
146}
147
148static __always_inline unsigned long __must_check
149copy_to_user(void __user *to, const void *from, unsigned long n)
150{
151 if (likely(check_copy_size(from, n, true)))
152 n = _copy_to_user(to, from, n);
153 return n;
154}
155#ifdef CONFIG_COMPAT
156static __always_inline unsigned long __must_check
157copy_in_user(void __user *to, const void __user *from, unsigned long n)
158{
159 might_fault();
160 if (access_ok(to, n) && access_ok(from, n))
161 n = raw_copy_in_user(to, from, n);
162 return n;
163}
164#endif
165
166static __always_inline void pagefault_disabled_inc(void)
167{
168 current->pagefault_disabled++;
169}
170
171static __always_inline void pagefault_disabled_dec(void)
172{
173 current->pagefault_disabled--;
174}
175
176
177
178
179
180
181
182
183static inline void pagefault_disable(void)
184{
185 pagefault_disabled_inc();
186
187
188
189
190 barrier();
191}
192
193static inline void pagefault_enable(void)
194{
195
196
197
198
199 barrier();
200 pagefault_disabled_dec();
201}
202
203
204
205
206static inline bool pagefault_disabled(void)
207{
208 return current->pagefault_disabled != 0;
209}
210
211
212
213
214
215
216
217
218
219
220
221#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
222
223#ifndef ARCH_HAS_NOCACHE_UACCESS
224
225static inline __must_check unsigned long
226__copy_from_user_inatomic_nocache(void *to, const void __user *from,
227 unsigned long n)
228{
229 return __copy_from_user_inatomic(to, from, n);
230}
231
232#endif
233
234extern __must_check int check_zeroed_user(const void __user *from, size_t size);
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283static __always_inline __must_check int
284copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
285 size_t usize)
286{
287 size_t size = min(ksize, usize);
288 size_t rest = max(ksize, usize) - size;
289
290
291 if (usize < ksize) {
292 memset(dst + size, 0, rest);
293 } else if (usize > ksize) {
294 int ret = check_zeroed_user(src + size, rest);
295 if (ret <= 0)
296 return ret ?: -E2BIG;
297 }
298
299 if (copy_from_user(dst, src, size))
300 return -EFAULT;
301 return 0;
302}
303
304
305
306
307
308
309
310
311
312
313extern long probe_kernel_read(void *dst, const void *src, size_t size);
314extern long probe_kernel_read_strict(void *dst, const void *src, size_t size);
315extern long __probe_kernel_read(void *dst, const void *src, size_t size);
316
317
318
319
320
321
322
323
324
325
326extern long probe_user_read(void *dst, const void __user *src, size_t size);
327extern long __probe_user_read(void *dst, const void __user *src, size_t size);
328
329
330
331
332
333
334
335
336
337
338extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
339extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
340
341
342
343
344
345
346
347
348
349
350extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
351extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
352
353extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
354extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
355 long count);
356extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
357extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
358 long count);
359extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
360
361
362
363
364
365
366
367
368#define probe_kernel_address(addr, retval) \
369 probe_kernel_read(&retval, addr, sizeof(retval))
370
371#ifndef user_access_begin
372#define user_access_begin(ptr,len) access_ok(ptr, len)
373#define user_access_end() do { } while (0)
374#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
375#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
376#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
377#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
378static inline unsigned long user_access_save(void) { return 0UL; }
379static inline void user_access_restore(unsigned long flags) { }
380#endif
381
382#ifdef CONFIG_HARDENED_USERCOPY
383void usercopy_warn(const char *name, const char *detail, bool to_user,
384 unsigned long offset, unsigned long len);
385void __noreturn usercopy_abort(const char *name, const char *detail,
386 bool to_user, unsigned long offset,
387 unsigned long len);
388#endif
389
390#endif
391