1
2#ifndef __LINUX_UACCESS_H__
3#define __LINUX_UACCESS_H__
4
5#include <linux/fault-inject-usercopy.h>
6#include <linux/instrumented.h>
7#include <linux/minmax.h>
8#include <linux/sched.h>
9#include <linux/thread_info.h>
10
11#include <asm/uaccess.h>
12
13#ifdef CONFIG_SET_FS
14
15
16
17
18
19static inline mm_segment_t force_uaccess_begin(void)
20{
21 mm_segment_t fs = get_fs();
22
23 set_fs(USER_DS);
24 return fs;
25}
26
27static inline void force_uaccess_end(mm_segment_t oldfs)
28{
29 set_fs(oldfs);
30}
31#else
32typedef struct {
33
34} mm_segment_t;
35
36#ifndef TASK_SIZE_MAX
37#define TASK_SIZE_MAX TASK_SIZE
38#endif
39
40#define uaccess_kernel() (false)
41#define user_addr_max() (TASK_SIZE_MAX)
42
43static inline mm_segment_t force_uaccess_begin(void)
44{
45 return (mm_segment_t) { };
46}
47
48static inline void force_uaccess_end(mm_segment_t oldfs)
49{
50}
51#endif
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98static __always_inline __must_check unsigned long
99__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
100{
101 instrument_copy_from_user(to, from, n);
102 check_object_size(to, n, false);
103 return raw_copy_from_user(to, from, n);
104}
105
106static __always_inline __must_check unsigned long
107__copy_from_user(void *to, const void __user *from, unsigned long n)
108{
109 might_fault();
110 if (should_fail_usercopy())
111 return n;
112 instrument_copy_from_user(to, from, n);
113 check_object_size(to, n, false);
114 return raw_copy_from_user(to, from, n);
115}
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130static __always_inline __must_check unsigned long
131__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
132{
133 if (should_fail_usercopy())
134 return n;
135 instrument_copy_to_user(to, from, n);
136 check_object_size(from, n, true);
137 return raw_copy_to_user(to, from, n);
138}
139
140static __always_inline __must_check unsigned long
141__copy_to_user(void __user *to, const void *from, unsigned long n)
142{
143 might_fault();
144 if (should_fail_usercopy())
145 return n;
146 instrument_copy_to_user(to, from, n);
147 check_object_size(from, n, true);
148 return raw_copy_to_user(to, from, n);
149}
150
151#ifdef INLINE_COPY_FROM_USER
152static inline __must_check unsigned long
153_copy_from_user(void *to, const void __user *from, unsigned long n)
154{
155 unsigned long res = n;
156 might_fault();
157 if (!should_fail_usercopy() && likely(access_ok(from, n))) {
158 instrument_copy_from_user(to, from, n);
159 res = raw_copy_from_user(to, from, n);
160 }
161 if (unlikely(res))
162 memset(to + (n - res), 0, res);
163 return res;
164}
165#else
166extern __must_check unsigned long
167_copy_from_user(void *, const void __user *, unsigned long);
168#endif
169
170#ifdef INLINE_COPY_TO_USER
171static inline __must_check unsigned long
172_copy_to_user(void __user *to, const void *from, unsigned long n)
173{
174 might_fault();
175 if (should_fail_usercopy())
176 return n;
177 if (access_ok(to, n)) {
178 instrument_copy_to_user(to, from, n);
179 n = raw_copy_to_user(to, from, n);
180 }
181 return n;
182}
183#else
184extern __must_check unsigned long
185_copy_to_user(void __user *, const void *, unsigned long);
186#endif
187
188static __always_inline unsigned long __must_check
189copy_from_user(void *to, const void __user *from, unsigned long n)
190{
191 if (likely(check_copy_size(to, n, false)))
192 n = _copy_from_user(to, from, n);
193 return n;
194}
195
196static __always_inline unsigned long __must_check
197copy_to_user(void __user *to, const void *from, unsigned long n)
198{
199 if (likely(check_copy_size(from, n, true)))
200 n = _copy_to_user(to, from, n);
201 return n;
202}
203
204#ifndef copy_mc_to_kernel
205
206
207
208
209static inline unsigned long __must_check
210copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
211{
212 memcpy(dst, src, cnt);
213 return 0;
214}
215#endif
216
217static __always_inline void pagefault_disabled_inc(void)
218{
219 current->pagefault_disabled++;
220}
221
222static __always_inline void pagefault_disabled_dec(void)
223{
224 current->pagefault_disabled--;
225}
226
227
228
229
230
231
232
233
234static inline void pagefault_disable(void)
235{
236 pagefault_disabled_inc();
237
238
239
240
241 barrier();
242}
243
244static inline void pagefault_enable(void)
245{
246
247
248
249
250 barrier();
251 pagefault_disabled_dec();
252}
253
254
255
256
257static inline bool pagefault_disabled(void)
258{
259 return current->pagefault_disabled != 0;
260}
261
262
263
264
265
266
267
268
269
270
271
272#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
273
274#ifndef ARCH_HAS_NOCACHE_UACCESS
275
276static inline __must_check unsigned long
277__copy_from_user_inatomic_nocache(void *to, const void __user *from,
278 unsigned long n)
279{
280 return __copy_from_user_inatomic(to, from, n);
281}
282
283#endif
284
285extern __must_check int check_zeroed_user(const void __user *from, size_t size);
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334static __always_inline __must_check int
335copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
336 size_t usize)
337{
338 size_t size = min(ksize, usize);
339 size_t rest = max(ksize, usize) - size;
340
341
342 if (usize < ksize) {
343 memset(dst + size, 0, rest);
344 } else if (usize > ksize) {
345 int ret = check_zeroed_user(src + size, rest);
346 if (ret <= 0)
347 return ret ?: -E2BIG;
348 }
349
350 if (copy_from_user(dst, src, size))
351 return -EFAULT;
352 return 0;
353}
354
355bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
356
357long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
358long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
359
360long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
361long notrace copy_to_user_nofault(void __user *dst, const void *src,
362 size_t size);
363
364long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
365 long count);
366
367long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
368 long count);
369long strnlen_user_nofault(const void __user *unsafe_addr, long count);
370
371
372
373
374
375
376
377
378#define get_kernel_nofault(val, ptr) ({ \
379 const typeof(val) *__gk_ptr = (ptr); \
380 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
381})
382
383#ifndef user_access_begin
384#define user_access_begin(ptr,len) access_ok(ptr, len)
385#define user_access_end() do { } while (0)
386#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
387#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
388#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
389#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
390#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
391static inline unsigned long user_access_save(void) { return 0UL; }
392static inline void user_access_restore(unsigned long flags) { }
393#endif
394#ifndef user_write_access_begin
395#define user_write_access_begin user_access_begin
396#define user_write_access_end user_access_end
397#endif
398#ifndef user_read_access_begin
399#define user_read_access_begin user_access_begin
400#define user_read_access_end user_access_end
401#endif
402
403#ifdef CONFIG_HARDENED_USERCOPY
404void usercopy_warn(const char *name, const char *detail, bool to_user,
405 unsigned long offset, unsigned long len);
406void __noreturn usercopy_abort(const char *name, const char *detail,
407 bool to_user, unsigned long offset,
408 unsigned long len);
409#endif
410
411#endif
412