1#ifndef __LINUX_UACCESS_H__
2#define __LINUX_UACCESS_H__
3
4#include <linux/sched.h>
5#include <linux/thread_info.h>
6#include <linux/kasan-checks.h>
7
8#define VERIFY_READ 0
9#define VERIFY_WRITE 1
10
11#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
12
13#include <asm/uaccess.h>
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60static __always_inline unsigned long
61__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
62{
63 kasan_check_write(to, n);
64 check_object_size(to, n, false);
65 return raw_copy_from_user(to, from, n);
66}
67
68static __always_inline unsigned long
69__copy_from_user(void *to, const void __user *from, unsigned long n)
70{
71 might_fault();
72 kasan_check_write(to, n);
73 check_object_size(to, n, false);
74 return raw_copy_from_user(to, from, n);
75}
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90static __always_inline unsigned long
91__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
92{
93 kasan_check_read(from, n);
94 check_object_size(from, n, true);
95 return raw_copy_to_user(to, from, n);
96}
97
98static __always_inline unsigned long
99__copy_to_user(void __user *to, const void *from, unsigned long n)
100{
101 might_fault();
102 kasan_check_read(from, n);
103 check_object_size(from, n, true);
104 return raw_copy_to_user(to, from, n);
105}
106
107#ifdef INLINE_COPY_FROM_USER
108static inline unsigned long
109_copy_from_user(void *to, const void __user *from, unsigned long n)
110{
111 unsigned long res = n;
112 might_fault();
113 if (likely(access_ok(VERIFY_READ, from, n))) {
114 kasan_check_write(to, n);
115 res = raw_copy_from_user(to, from, n);
116 }
117 if (unlikely(res))
118 memset(to + (n - res), 0, res);
119 return res;
120}
121#else
122extern unsigned long
123_copy_from_user(void *, const void __user *, unsigned long);
124#endif
125
126#ifdef INLINE_COPY_TO_USER
127static inline unsigned long
128_copy_to_user(void __user *to, const void *from, unsigned long n)
129{
130 might_fault();
131 if (access_ok(VERIFY_WRITE, to, n)) {
132 kasan_check_read(from, n);
133 n = raw_copy_to_user(to, from, n);
134 }
135 return n;
136}
137#else
138extern unsigned long
139_copy_to_user(void __user *, const void *, unsigned long);
140#endif
141
142static __always_inline unsigned long __must_check
143copy_from_user(void *to, const void __user *from, unsigned long n)
144{
145 if (likely(check_copy_size(to, n, false)))
146 n = _copy_from_user(to, from, n);
147 return n;
148}
149
150static __always_inline unsigned long __must_check
151copy_to_user(void __user *to, const void *from, unsigned long n)
152{
153 if (likely(check_copy_size(from, n, true)))
154 n = _copy_to_user(to, from, n);
155 return n;
156}
157#ifdef CONFIG_COMPAT
158static __always_inline unsigned long __must_check
159copy_in_user(void __user *to, const void *from, unsigned long n)
160{
161 might_fault();
162 if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
163 n = raw_copy_in_user(to, from, n);
164 return n;
165}
166#endif
167
168static __always_inline void pagefault_disabled_inc(void)
169{
170 current->pagefault_disabled++;
171}
172
173static __always_inline void pagefault_disabled_dec(void)
174{
175 current->pagefault_disabled--;
176}
177
178
179
180
181
182
183
184
185static inline void pagefault_disable(void)
186{
187 pagefault_disabled_inc();
188
189
190
191
192 barrier();
193}
194
195static inline void pagefault_enable(void)
196{
197
198
199
200
201 barrier();
202 pagefault_disabled_dec();
203}
204
205
206
207
208#define pagefault_disabled() (current->pagefault_disabled != 0)
209
210
211
212
213
214
215
216
217
218
219
220#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
221
222#ifndef ARCH_HAS_NOCACHE_UACCESS
223
224static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
225 const void __user *from, unsigned long n)
226{
227 return __copy_from_user_inatomic(to, from, n);
228}
229
230#endif
231
232
233
234
235
236
237
238
239
240
241extern long probe_kernel_read(void *dst, const void *src, size_t size);
242extern long __probe_kernel_read(void *dst, const void *src, size_t size);
243
244
245
246
247
248
249
250
251
252
253extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
254extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
255
256extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
257
258
259
260
261
262
263
264
265#define probe_kernel_address(addr, retval) \
266 probe_kernel_read(&retval, addr, sizeof(retval))
267
268#ifndef user_access_begin
269#define user_access_begin() do { } while (0)
270#define user_access_end() do { } while (0)
271#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
272#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
273#endif
274
275#endif
276