1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/thread_info.h>
23#include <asm/sections.h>
24
25
26
27
28
29
30
31
32
33
34
35static noinline int check_stack_object(const void *obj, unsigned long len)
36{
37 const void * const stack = task_stack_page(current);
38 const void * const stackend = stack + THREAD_SIZE;
39 int ret;
40
41
42 if (obj + len <= stack || stackend <= obj)
43 return NOT_STACK;
44
45
46
47
48
49
50 if (obj < stack || stackend < obj + len)
51 return BAD_STACK;
52
53
54 ret = arch_within_stack_frames(stack, stackend, obj, len);
55 if (ret)
56 return ret;
57
58 return GOOD_STACK;
59}
60
61
62
63
64
65
66
67
68
69
70
71
72
73void usercopy_warn(const char *name, const char *detail, bool to_user,
74 unsigned long offset, unsigned long len)
75{
76 WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
77 to_user ? "exposure" : "overwrite",
78 to_user ? "from" : "to",
79 name ? : "unknown?!",
80 detail ? " '" : "", detail ? : "", detail ? "'" : "",
81 offset, len);
82}
83
84void __noreturn usercopy_abort(const char *name, const char *detail,
85 bool to_user, unsigned long offset,
86 unsigned long len)
87{
88 pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
89 to_user ? "exposure" : "overwrite",
90 to_user ? "from" : "to",
91 name ? : "unknown?!",
92 detail ? " '" : "", detail ? : "", detail ? "'" : "",
93 offset, len);
94
95
96
97
98
99
100 BUG();
101}
102
103
104static bool overlaps(const unsigned long ptr, unsigned long n,
105 unsigned long low, unsigned long high)
106{
107 const unsigned long check_low = ptr;
108 unsigned long check_high = check_low + n;
109
110
111 if (check_low >= high || check_high <= low)
112 return false;
113
114 return true;
115}
116
117
118static inline void check_kernel_text_object(const unsigned long ptr,
119 unsigned long n, bool to_user)
120{
121 unsigned long textlow = (unsigned long)_stext;
122 unsigned long texthigh = (unsigned long)_etext;
123 unsigned long textlow_linear, texthigh_linear;
124
125 if (overlaps(ptr, n, textlow, texthigh))
126 usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
127
128
129
130
131
132
133
134
135
136 textlow_linear = (unsigned long)lm_alias(textlow);
137
138 if (textlow_linear == textlow)
139 return;
140
141
142 texthigh_linear = (unsigned long)lm_alias(texthigh);
143 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
144 usercopy_abort("linear kernel text", NULL, to_user,
145 ptr - textlow_linear, n);
146}
147
148static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
149 bool to_user)
150{
151
152 if (ptr + n < ptr)
153 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
154
155
156 if (ZERO_OR_NULL_PTR(ptr))
157 usercopy_abort("null address", NULL, to_user, ptr, n);
158}
159
160
161static inline void check_page_span(const void *ptr, unsigned long n,
162 struct page *page, bool to_user)
163{
164#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
165 const void *end = ptr + n - 1;
166 struct page *endpage;
167 bool is_reserved, is_cma;
168
169
170
171
172
173
174
175
176 if (ptr >= (const void *)__start_rodata &&
177 end <= (const void *)__end_rodata) {
178 if (!to_user)
179 usercopy_abort("rodata", NULL, to_user, 0, n);
180 return;
181 }
182
183
184 if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
185 return;
186
187
188 if (ptr >= (const void *)__bss_start &&
189 end <= (const void *)__bss_stop)
190 return;
191
192
193 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
194 ((unsigned long)end & (unsigned long)PAGE_MASK)))
195 return;
196
197
198 endpage = virt_to_head_page(end);
199 if (likely(endpage == page))
200 return;
201
202
203
204
205
206
207 is_reserved = PageReserved(page);
208 is_cma = is_migrate_cma_page(page);
209 if (!is_reserved && !is_cma)
210 usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
211
212 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
213 page = virt_to_head_page(ptr);
214 if (is_reserved && !PageReserved(page))
215 usercopy_abort("spans Reserved and non-Reserved pages",
216 NULL, to_user, 0, n);
217 if (is_cma && !is_migrate_cma_page(page))
218 usercopy_abort("spans CMA and non-CMA pages", NULL,
219 to_user, 0, n);
220 }
221#endif
222}
223
224static inline void check_heap_object(const void *ptr, unsigned long n,
225 bool to_user)
226{
227 struct page *page;
228
229 if (!virt_addr_valid(ptr))
230 return;
231
232 page = virt_to_head_page(ptr);
233
234 if (PageSlab(page)) {
235
236 __check_heap_object(ptr, n, page, to_user);
237 } else {
238
239 check_page_span(ptr, n, page, to_user);
240 }
241}
242
243
244
245
246
247
248
249void __check_object_size(const void *ptr, unsigned long n, bool to_user)
250{
251
252 if (!n)
253 return;
254
255
256 check_bogus_address((const unsigned long)ptr, n, to_user);
257
258
259 check_heap_object(ptr, n, to_user);
260
261
262 switch (check_stack_object(ptr, n)) {
263 case NOT_STACK:
264
265 break;
266 case GOOD_FRAME:
267 case GOOD_STACK:
268
269
270
271
272
273 return;
274 default:
275 usercopy_abort("process stack", NULL, to_user, 0, n);
276 }
277
278
279 check_kernel_text_object((const unsigned long)ptr, n, to_user);
280}
281EXPORT_SYMBOL(__check_object_size);
282