1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/thread_info.h>
23#include <linux/atomic.h>
24#include <linux/jump_label.h>
25#include <asm/sections.h>
26
27
28
29
30
31
32
33
34
35
36
37static noinline int check_stack_object(const void *obj, unsigned long len)
38{
39 const void * const stack = task_stack_page(current);
40 const void * const stackend = stack + THREAD_SIZE;
41 int ret;
42
43
44 if (obj + len <= stack || stackend <= obj)
45 return NOT_STACK;
46
47
48
49
50
51
52 if (obj < stack || stackend < obj + len)
53 return BAD_STACK;
54
55
56 ret = arch_within_stack_frames(stack, stackend, obj, len);
57 if (ret)
58 return ret;
59
60 return GOOD_STACK;
61}
62
63
64
65
66
67
68
69
70
71
72
73
74
75void usercopy_warn(const char *name, const char *detail, bool to_user,
76 unsigned long offset, unsigned long len)
77{
78 WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
79 to_user ? "exposure" : "overwrite",
80 to_user ? "from" : "to",
81 name ? : "unknown?!",
82 detail ? " '" : "", detail ? : "", detail ? "'" : "",
83 offset, len);
84}
85
86void __noreturn usercopy_abort(const char *name, const char *detail,
87 bool to_user, unsigned long offset,
88 unsigned long len)
89{
90 pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
91 to_user ? "exposure" : "overwrite",
92 to_user ? "from" : "to",
93 name ? : "unknown?!",
94 detail ? " '" : "", detail ? : "", detail ? "'" : "",
95 offset, len);
96
97
98
99
100
101
102 BUG();
103}
104
105
106static bool overlaps(const unsigned long ptr, unsigned long n,
107 unsigned long low, unsigned long high)
108{
109 const unsigned long check_low = ptr;
110 unsigned long check_high = check_low + n;
111
112
113 if (check_low >= high || check_high <= low)
114 return false;
115
116 return true;
117}
118
119
120static inline void check_kernel_text_object(const unsigned long ptr,
121 unsigned long n, bool to_user)
122{
123 unsigned long textlow = (unsigned long)_stext;
124 unsigned long texthigh = (unsigned long)_etext;
125 unsigned long textlow_linear, texthigh_linear;
126
127 if (overlaps(ptr, n, textlow, texthigh))
128 usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
129
130
131
132
133
134
135
136
137
138 textlow_linear = (unsigned long)lm_alias(textlow);
139
140 if (textlow_linear == textlow)
141 return;
142
143
144 texthigh_linear = (unsigned long)lm_alias(texthigh);
145 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
146 usercopy_abort("linear kernel text", NULL, to_user,
147 ptr - textlow_linear, n);
148}
149
150static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
151 bool to_user)
152{
153
154 if (ptr + n < ptr)
155 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
156
157
158 if (ZERO_OR_NULL_PTR(ptr))
159 usercopy_abort("null address", NULL, to_user, ptr, n);
160}
161
162
163static inline void check_page_span(const void *ptr, unsigned long n,
164 struct page *page, bool to_user)
165{
166#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
167 const void *end = ptr + n - 1;
168 struct page *endpage;
169 bool is_reserved, is_cma;
170
171
172
173
174
175
176
177
178 if (ptr >= (const void *)__start_rodata &&
179 end <= (const void *)__end_rodata) {
180 if (!to_user)
181 usercopy_abort("rodata", NULL, to_user, 0, n);
182 return;
183 }
184
185
186 if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
187 return;
188
189
190 if (ptr >= (const void *)__bss_start &&
191 end <= (const void *)__bss_stop)
192 return;
193
194
195 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
196 ((unsigned long)end & (unsigned long)PAGE_MASK)))
197 return;
198
199
200 endpage = virt_to_head_page(end);
201 if (likely(endpage == page))
202 return;
203
204
205
206
207
208
209 is_reserved = PageReserved(page);
210 is_cma = is_migrate_cma_page(page);
211 if (!is_reserved && !is_cma)
212 usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
213
214 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
215 page = virt_to_head_page(ptr);
216 if (is_reserved && !PageReserved(page))
217 usercopy_abort("spans Reserved and non-Reserved pages",
218 NULL, to_user, 0, n);
219 if (is_cma && !is_migrate_cma_page(page))
220 usercopy_abort("spans CMA and non-CMA pages", NULL,
221 to_user, 0, n);
222 }
223#endif
224}
225
226static inline void check_heap_object(const void *ptr, unsigned long n,
227 bool to_user)
228{
229 struct page *page;
230
231 if (!virt_addr_valid(ptr))
232 return;
233
234 page = virt_to_head_page(ptr);
235
236 if (PageSlab(page)) {
237
238 __check_heap_object(ptr, n, page, to_user);
239 } else {
240
241 check_page_span(ptr, n, page, to_user);
242 }
243}
244
245static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
246
247
248
249
250
251
252
253void __check_object_size(const void *ptr, unsigned long n, bool to_user)
254{
255 if (static_branch_unlikely(&bypass_usercopy_checks))
256 return;
257
258
259 if (!n)
260 return;
261
262
263 check_bogus_address((const unsigned long)ptr, n, to_user);
264
265
266 check_heap_object(ptr, n, to_user);
267
268
269 switch (check_stack_object(ptr, n)) {
270 case NOT_STACK:
271
272 break;
273 case GOOD_FRAME:
274 case GOOD_STACK:
275
276
277
278
279
280 return;
281 default:
282 usercopy_abort("process stack", NULL, to_user, 0, n);
283 }
284
285
286 check_kernel_text_object((const unsigned long)ptr, n, to_user);
287}
288EXPORT_SYMBOL(__check_object_size);
289
290static bool enable_checks __initdata = true;
291
292static int __init parse_hardened_usercopy(char *str)
293{
294 return strtobool(str, &enable_checks);
295}
296
297__setup("hardened_usercopy=", parse_hardened_usercopy);
298
299static int __init set_hardened_usercopy(void)
300{
301 if (enable_checks == false)
302 static_branch_enable(&bypass_usercopy_checks);
303 return 1;
304}
305
306late_initcall(set_hardened_usercopy);
307