1
2#ifndef __LINUX_UACCESS_H__
3#define __LINUX_UACCESS_H__
4
5#include <linux/fault-inject-usercopy.h>
6#include <linux/instrumented.h>
7#include <linux/minmax.h>
8#include <linux/sched.h>
9#include <linux/thread_info.h>
10
11#include <asm/uaccess.h>
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58static __always_inline __must_check unsigned long
59__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
60{
61 instrument_copy_from_user(to, from, n);
62 check_object_size(to, n, false);
63 return raw_copy_from_user(to, from, n);
64}
65
66static __always_inline __must_check unsigned long
67__copy_from_user(void *to, const void __user *from, unsigned long n)
68{
69 might_fault();
70 if (should_fail_usercopy())
71 return n;
72 instrument_copy_from_user(to, from, n);
73 check_object_size(to, n, false);
74 return raw_copy_from_user(to, from, n);
75}
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90static __always_inline __must_check unsigned long
91__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
92{
93 if (should_fail_usercopy())
94 return n;
95 instrument_copy_to_user(to, from, n);
96 check_object_size(from, n, true);
97 return raw_copy_to_user(to, from, n);
98}
99
100static __always_inline __must_check unsigned long
101__copy_to_user(void __user *to, const void *from, unsigned long n)
102{
103 might_fault();
104 if (should_fail_usercopy())
105 return n;
106 instrument_copy_to_user(to, from, n);
107 check_object_size(from, n, true);
108 return raw_copy_to_user(to, from, n);
109}
110
111#ifdef INLINE_COPY_FROM_USER
112static inline __must_check unsigned long
113_copy_from_user(void *to, const void __user *from, unsigned long n)
114{
115 unsigned long res = n;
116 might_fault();
117 if (!should_fail_usercopy() && likely(access_ok(from, n))) {
118 instrument_copy_from_user(to, from, n);
119 res = raw_copy_from_user(to, from, n);
120 }
121 if (unlikely(res))
122 memset(to + (n - res), 0, res);
123 return res;
124}
125#else
126extern __must_check unsigned long
127_copy_from_user(void *, const void __user *, unsigned long);
128#endif
129
130#ifdef INLINE_COPY_TO_USER
131static inline __must_check unsigned long
132_copy_to_user(void __user *to, const void *from, unsigned long n)
133{
134 might_fault();
135 if (should_fail_usercopy())
136 return n;
137 if (access_ok(to, n)) {
138 instrument_copy_to_user(to, from, n);
139 n = raw_copy_to_user(to, from, n);
140 }
141 return n;
142}
143#else
144extern __must_check unsigned long
145_copy_to_user(void __user *, const void *, unsigned long);
146#endif
147
148static __always_inline unsigned long __must_check
149copy_from_user(void *to, const void __user *from, unsigned long n)
150{
151 if (likely(check_copy_size(to, n, false)))
152 n = _copy_from_user(to, from, n);
153 return n;
154}
155
156static __always_inline unsigned long __must_check
157copy_to_user(void __user *to, const void *from, unsigned long n)
158{
159 if (likely(check_copy_size(from, n, true)))
160 n = _copy_to_user(to, from, n);
161 return n;
162}
163
164#ifndef copy_mc_to_kernel
165
166
167
168
169static inline unsigned long __must_check
170copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
171{
172 memcpy(dst, src, cnt);
173 return 0;
174}
175#endif
176
177static __always_inline void pagefault_disabled_inc(void)
178{
179 current->pagefault_disabled++;
180}
181
182static __always_inline void pagefault_disabled_dec(void)
183{
184 current->pagefault_disabled--;
185}
186
187
188
189
190
191
192
193
194static inline void pagefault_disable(void)
195{
196 pagefault_disabled_inc();
197
198
199
200
201 barrier();
202}
203
204static inline void pagefault_enable(void)
205{
206
207
208
209
210 barrier();
211 pagefault_disabled_dec();
212}
213
214
215
216
217static inline bool pagefault_disabled(void)
218{
219 return current->pagefault_disabled != 0;
220}
221
222
223
224
225
226
227
228
229
230
231
232#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
233
234#ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
250{
251 return 0;
252}
253
254#endif
255
256#ifndef ARCH_HAS_NOCACHE_UACCESS
257
258static inline __must_check unsigned long
259__copy_from_user_inatomic_nocache(void *to, const void __user *from,
260 unsigned long n)
261{
262 return __copy_from_user_inatomic(to, from, n);
263}
264
265#endif
266
267extern __must_check int check_zeroed_user(const void __user *from, size_t size);
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316static __always_inline __must_check int
317copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
318 size_t usize)
319{
320 size_t size = min(ksize, usize);
321 size_t rest = max(ksize, usize) - size;
322
323
324 if (usize < ksize) {
325 memset(dst + size, 0, rest);
326 } else if (usize > ksize) {
327 int ret = check_zeroed_user(src + size, rest);
328 if (ret <= 0)
329 return ret ?: -E2BIG;
330 }
331
332 if (copy_from_user(dst, src, size))
333 return -EFAULT;
334 return 0;
335}
336
337bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
338
339long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
340long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
341
342long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
343long notrace copy_to_user_nofault(void __user *dst, const void *src,
344 size_t size);
345
346long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
347 long count);
348
349long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
350 long count);
351long strnlen_user_nofault(const void __user *unsafe_addr, long count);
352
353#ifndef __get_kernel_nofault
354#define __get_kernel_nofault(dst, src, type, label) \
355do { \
356 type __user *p = (type __force __user *)(src); \
357 type data; \
358 if (__get_user(data, p)) \
359 goto label; \
360 *(type *)dst = data; \
361} while (0)
362
363#define __put_kernel_nofault(dst, src, type, label) \
364do { \
365 type __user *p = (type __force __user *)(dst); \
366 type data = *(type *)src; \
367 if (__put_user(data, p)) \
368 goto label; \
369} while (0)
370#endif
371
372
373
374
375
376
377
378
379#define get_kernel_nofault(val, ptr) ({ \
380 const typeof(val) *__gk_ptr = (ptr); \
381 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
382})
383
384#ifndef user_access_begin
385#define user_access_begin(ptr,len) access_ok(ptr, len)
386#define user_access_end() do { } while (0)
387#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
388#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
389#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
390#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
391#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
392static inline unsigned long user_access_save(void) { return 0UL; }
393static inline void user_access_restore(unsigned long flags) { }
394#endif
395#ifndef user_write_access_begin
396#define user_write_access_begin user_access_begin
397#define user_write_access_end user_access_end
398#endif
399#ifndef user_read_access_begin
400#define user_read_access_begin user_access_begin
401#define user_read_access_end user_access_end
402#endif
403
404#ifdef CONFIG_HARDENED_USERCOPY
405void __noreturn usercopy_abort(const char *name, const char *detail,
406 bool to_user, unsigned long offset,
407 unsigned long len);
408#endif
409
410#endif
411