1
2#ifndef __LINUX_UACCESS_H__
3#define __LINUX_UACCESS_H__
4
5#include <linux/fault-inject-usercopy.h>
6#include <linux/instrumented.h>
7#include <linux/minmax.h>
8#include <linux/sched.h>
9#include <linux/thread_info.h>
10
11#include <asm/uaccess.h>
12
13#ifdef CONFIG_SET_FS
14
15
16
17
18
19static inline mm_segment_t force_uaccess_begin(void)
20{
21 mm_segment_t fs = get_fs();
22
23 set_fs(USER_DS);
24 return fs;
25}
26
27static inline void force_uaccess_end(mm_segment_t oldfs)
28{
29 set_fs(oldfs);
30}
31#else
32typedef struct {
33
34} mm_segment_t;
35
36#ifndef TASK_SIZE_MAX
37#define TASK_SIZE_MAX TASK_SIZE
38#endif
39
40#define uaccess_kernel() (false)
41#define user_addr_max() (TASK_SIZE_MAX)
42
43static inline mm_segment_t force_uaccess_begin(void)
44{
45 return (mm_segment_t) { };
46}
47
48static inline void force_uaccess_end(mm_segment_t oldfs)
49{
50}
51#endif
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98static __always_inline __must_check unsigned long
99__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
100{
101 instrument_copy_from_user(to, from, n);
102 check_object_size(to, n, false);
103 return raw_copy_from_user(to, from, n);
104}
105
106static __always_inline __must_check unsigned long
107__copy_from_user(void *to, const void __user *from, unsigned long n)
108{
109 might_fault();
110 if (should_fail_usercopy())
111 return n;
112 instrument_copy_from_user(to, from, n);
113 check_object_size(to, n, false);
114 return raw_copy_from_user(to, from, n);
115}
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130static __always_inline __must_check unsigned long
131__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
132{
133 if (should_fail_usercopy())
134 return n;
135 instrument_copy_to_user(to, from, n);
136 check_object_size(from, n, true);
137 return raw_copy_to_user(to, from, n);
138}
139
140static __always_inline __must_check unsigned long
141__copy_to_user(void __user *to, const void *from, unsigned long n)
142{
143 might_fault();
144 if (should_fail_usercopy())
145 return n;
146 instrument_copy_to_user(to, from, n);
147 check_object_size(from, n, true);
148 return raw_copy_to_user(to, from, n);
149}
150
151#ifdef INLINE_COPY_FROM_USER
152static inline __must_check unsigned long
153_copy_from_user(void *to, const void __user *from, unsigned long n)
154{
155 unsigned long res = n;
156 might_fault();
157 if (!should_fail_usercopy() && likely(access_ok(from, n))) {
158 instrument_copy_from_user(to, from, n);
159 res = raw_copy_from_user(to, from, n);
160 }
161 if (unlikely(res))
162 memset(to + (n - res), 0, res);
163 return res;
164}
165#else
166extern __must_check unsigned long
167_copy_from_user(void *, const void __user *, unsigned long);
168#endif
169
170#ifdef INLINE_COPY_TO_USER
171static inline __must_check unsigned long
172_copy_to_user(void __user *to, const void *from, unsigned long n)
173{
174 might_fault();
175 if (should_fail_usercopy())
176 return n;
177 if (access_ok(to, n)) {
178 instrument_copy_to_user(to, from, n);
179 n = raw_copy_to_user(to, from, n);
180 }
181 return n;
182}
183#else
184extern __must_check unsigned long
185_copy_to_user(void __user *, const void *, unsigned long);
186#endif
187
188static __always_inline unsigned long __must_check
189copy_from_user(void *to, const void __user *from, unsigned long n)
190{
191 if (likely(check_copy_size(to, n, false)))
192 n = _copy_from_user(to, from, n);
193 return n;
194}
195
196static __always_inline unsigned long __must_check
197copy_to_user(void __user *to, const void *from, unsigned long n)
198{
199 if (likely(check_copy_size(from, n, true)))
200 n = _copy_to_user(to, from, n);
201 return n;
202}
203#ifdef CONFIG_COMPAT
204static __always_inline unsigned long __must_check
205copy_in_user(void __user *to, const void __user *from, unsigned long n)
206{
207 might_fault();
208 if (access_ok(to, n) && access_ok(from, n))
209 n = raw_copy_in_user(to, from, n);
210 return n;
211}
212#endif
213
214#ifndef copy_mc_to_kernel
215
216
217
218
219static inline unsigned long __must_check
220copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
221{
222 memcpy(dst, src, cnt);
223 return 0;
224}
225#endif
226
227static __always_inline void pagefault_disabled_inc(void)
228{
229 current->pagefault_disabled++;
230}
231
232static __always_inline void pagefault_disabled_dec(void)
233{
234 current->pagefault_disabled--;
235}
236
237
238
239
240
241
242
243
244static inline void pagefault_disable(void)
245{
246 pagefault_disabled_inc();
247
248
249
250
251 barrier();
252}
253
254static inline void pagefault_enable(void)
255{
256
257
258
259
260 barrier();
261 pagefault_disabled_dec();
262}
263
264
265
266
267static inline bool pagefault_disabled(void)
268{
269 return current->pagefault_disabled != 0;
270}
271
272
273
274
275
276
277
278
279
280
281
282#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
283
284#ifndef ARCH_HAS_NOCACHE_UACCESS
285
286static inline __must_check unsigned long
287__copy_from_user_inatomic_nocache(void *to, const void __user *from,
288 unsigned long n)
289{
290 return __copy_from_user_inatomic(to, from, n);
291}
292
293#endif
294
295extern __must_check int check_zeroed_user(const void __user *from, size_t size);
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344static __always_inline __must_check int
345copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
346 size_t usize)
347{
348 size_t size = min(ksize, usize);
349 size_t rest = max(ksize, usize) - size;
350
351
352 if (usize < ksize) {
353 memset(dst + size, 0, rest);
354 } else if (usize > ksize) {
355 int ret = check_zeroed_user(src + size, rest);
356 if (ret <= 0)
357 return ret ?: -E2BIG;
358 }
359
360 if (copy_from_user(dst, src, size))
361 return -EFAULT;
362 return 0;
363}
364
365bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
366
367long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
368long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
369
370long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
371long notrace copy_to_user_nofault(void __user *dst, const void *src,
372 size_t size);
373
374long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
375 long count);
376
377long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
378 long count);
379long strnlen_user_nofault(const void __user *unsafe_addr, long count);
380
381
382
383
384
385
386
387
388#define get_kernel_nofault(val, ptr) ({ \
389 const typeof(val) *__gk_ptr = (ptr); \
390 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
391})
392
393#ifndef user_access_begin
394#define user_access_begin(ptr,len) access_ok(ptr, len)
395#define user_access_end() do { } while (0)
396#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
397#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
398#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
399#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
400#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
401static inline unsigned long user_access_save(void) { return 0UL; }
402static inline void user_access_restore(unsigned long flags) { }
403#endif
404#ifndef user_write_access_begin
405#define user_write_access_begin user_access_begin
406#define user_write_access_end user_access_end
407#endif
408#ifndef user_read_access_begin
409#define user_read_access_begin user_access_begin
410#define user_read_access_end user_access_end
411#endif
412
413#ifdef CONFIG_HARDENED_USERCOPY
414void usercopy_warn(const char *name, const char *detail, bool to_user,
415 unsigned long offset, unsigned long len);
416void __noreturn usercopy_abort(const char *name, const char *detail,
417 bool to_user, unsigned long offset,
418 unsigned long len);
419#endif
420
421#endif
422