1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/init.h>
14#include <linux/kasan.h>
15#include <linux/kernel.h>
16#include <linux/kmemleak.h>
17#include <linux/memory.h>
18#include <linux/mm.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/vmalloc.h>
22
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25
26#include "kasan.h"
27
28bool __kasan_check_read(const volatile void *p, unsigned int size)
29{
30 return check_memory_region((unsigned long)p, size, false, _RET_IP_);
31}
32EXPORT_SYMBOL(__kasan_check_read);
33
34bool __kasan_check_write(const volatile void *p, unsigned int size)
35{
36 return check_memory_region((unsigned long)p, size, true, _RET_IP_);
37}
38EXPORT_SYMBOL(__kasan_check_write);
39
40#undef memset
41void *memset(void *addr, int c, size_t len)
42{
43 if (!check_memory_region((unsigned long)addr, len, true, _RET_IP_))
44 return NULL;
45
46 return __memset(addr, c, len);
47}
48
49#ifdef __HAVE_ARCH_MEMMOVE
50#undef memmove
51void *memmove(void *dest, const void *src, size_t len)
52{
53 if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
54 !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
55 return NULL;
56
57 return __memmove(dest, src, len);
58}
59#endif
60
61#undef memcpy
62void *memcpy(void *dest, const void *src, size_t len)
63{
64 if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
65 !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
66 return NULL;
67
68 return __memcpy(dest, src, len);
69}
70
71
72
73
74
75void poison_range(const void *address, size_t size, u8 value)
76{
77 void *shadow_start, *shadow_end;
78
79
80
81
82
83
84 address = kasan_reset_tag(address);
85 size = round_up(size, KASAN_GRANULE_SIZE);
86
87 shadow_start = kasan_mem_to_shadow(address);
88 shadow_end = kasan_mem_to_shadow(address + size);
89
90 __memset(shadow_start, value, shadow_end - shadow_start);
91}
92
93void unpoison_range(const void *address, size_t size)
94{
95 u8 tag = get_tag(address);
96
97
98
99
100
101
102 address = kasan_reset_tag(address);
103
104 poison_range(address, size, tag);
105
106 if (size & KASAN_GRANULE_MASK) {
107 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
108
109 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
110 *shadow = tag;
111 else
112 *shadow = size & KASAN_GRANULE_MASK;
113 }
114}
115
116#ifdef CONFIG_MEMORY_HOTPLUG
117static bool shadow_mapped(unsigned long addr)
118{
119 pgd_t *pgd = pgd_offset_k(addr);
120 p4d_t *p4d;
121 pud_t *pud;
122 pmd_t *pmd;
123 pte_t *pte;
124
125 if (pgd_none(*pgd))
126 return false;
127 p4d = p4d_offset(pgd, addr);
128 if (p4d_none(*p4d))
129 return false;
130 pud = pud_offset(p4d, addr);
131 if (pud_none(*pud))
132 return false;
133
134
135
136
137
138
139 if (pud_bad(*pud))
140 return true;
141 pmd = pmd_offset(pud, addr);
142 if (pmd_none(*pmd))
143 return false;
144
145 if (pmd_bad(*pmd))
146 return true;
147 pte = pte_offset_kernel(pmd, addr);
148 return !pte_none(*pte);
149}
150
151static int __meminit kasan_mem_notifier(struct notifier_block *nb,
152 unsigned long action, void *data)
153{
154 struct memory_notify *mem_data = data;
155 unsigned long nr_shadow_pages, start_kaddr, shadow_start;
156 unsigned long shadow_end, shadow_size;
157
158 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
159 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
160 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
161 shadow_size = nr_shadow_pages << PAGE_SHIFT;
162 shadow_end = shadow_start + shadow_size;
163
164 if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
165 WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE))
166 return NOTIFY_BAD;
167
168 switch (action) {
169 case MEM_GOING_ONLINE: {
170 void *ret;
171
172
173
174
175
176
177 if (shadow_mapped(shadow_start))
178 return NOTIFY_OK;
179
180 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
181 shadow_end, GFP_KERNEL,
182 PAGE_KERNEL, VM_NO_GUARD,
183 pfn_to_nid(mem_data->start_pfn),
184 __builtin_return_address(0));
185 if (!ret)
186 return NOTIFY_BAD;
187
188 kmemleak_ignore(ret);
189 return NOTIFY_OK;
190 }
191 case MEM_CANCEL_ONLINE:
192 case MEM_OFFLINE: {
193 struct vm_struct *vm;
194
195
196
197
198
199
200
201
202
203
204
205
206
207 vm = find_vm_area((void *)shadow_start);
208 if (vm)
209 vfree((void *)shadow_start);
210 }
211 }
212
213 return NOTIFY_OK;
214}
215
216static int __init kasan_memhotplug_init(void)
217{
218 hotplug_memory_notifier(kasan_mem_notifier, 0);
219
220 return 0;
221}
222
223core_initcall(kasan_memhotplug_init);
224#endif
225
226#ifdef CONFIG_KASAN_VMALLOC
227
228static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
229 void *unused)
230{
231 unsigned long page;
232 pte_t pte;
233
234 if (likely(!pte_none(*ptep)))
235 return 0;
236
237 page = __get_free_page(GFP_KERNEL);
238 if (!page)
239 return -ENOMEM;
240
241 memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
242 pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
243
244 spin_lock(&init_mm.page_table_lock);
245 if (likely(pte_none(*ptep))) {
246 set_pte_at(&init_mm, addr, ptep, pte);
247 page = 0;
248 }
249 spin_unlock(&init_mm.page_table_lock);
250 if (page)
251 free_page(page);
252 return 0;
253}
254
255int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
256{
257 unsigned long shadow_start, shadow_end;
258 int ret;
259
260 if (!is_vmalloc_or_module_addr((void *)addr))
261 return 0;
262
263 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
264 shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
265 shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
266 shadow_end = ALIGN(shadow_end, PAGE_SIZE);
267
268 ret = apply_to_page_range(&init_mm, shadow_start,
269 shadow_end - shadow_start,
270 kasan_populate_vmalloc_pte, NULL);
271 if (ret)
272 return ret;
273
274 flush_cache_vmap(shadow_start, shadow_end);
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311 return 0;
312}
313
314
315
316
317
318void kasan_poison_vmalloc(const void *start, unsigned long size)
319{
320 if (!is_vmalloc_or_module_addr(start))
321 return;
322
323 size = round_up(size, KASAN_GRANULE_SIZE);
324 poison_range(start, size, KASAN_VMALLOC_INVALID);
325}
326
327void kasan_unpoison_vmalloc(const void *start, unsigned long size)
328{
329 if (!is_vmalloc_or_module_addr(start))
330 return;
331
332 unpoison_range(start, size);
333}
334
335static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
336 void *unused)
337{
338 unsigned long page;
339
340 page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT);
341
342 spin_lock(&init_mm.page_table_lock);
343
344 if (likely(!pte_none(*ptep))) {
345 pte_clear(&init_mm, addr, ptep);
346 free_page(page);
347 }
348 spin_unlock(&init_mm.page_table_lock);
349
350 return 0;
351}
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428void kasan_release_vmalloc(unsigned long start, unsigned long end,
429 unsigned long free_region_start,
430 unsigned long free_region_end)
431{
432 void *shadow_start, *shadow_end;
433 unsigned long region_start, region_end;
434 unsigned long size;
435
436 region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
437 region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
438
439 free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE);
440
441 if (start != region_start &&
442 free_region_start < region_start)
443 region_start -= KASAN_MEMORY_PER_SHADOW_PAGE;
444
445 free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE);
446
447 if (end != region_end &&
448 free_region_end > region_end)
449 region_end += KASAN_MEMORY_PER_SHADOW_PAGE;
450
451 shadow_start = kasan_mem_to_shadow((void *)region_start);
452 shadow_end = kasan_mem_to_shadow((void *)region_end);
453
454 if (shadow_end > shadow_start) {
455 size = shadow_end - shadow_start;
456 apply_to_existing_page_range(&init_mm,
457 (unsigned long)shadow_start,
458 size, kasan_depopulate_vmalloc_pte,
459 NULL);
460 flush_tlb_kernel_range((unsigned long)shadow_start,
461 (unsigned long)shadow_end);
462 }
463}
464
465#else
466
467int kasan_module_alloc(void *addr, size_t size)
468{
469 void *ret;
470 size_t scaled_size;
471 size_t shadow_size;
472 unsigned long shadow_start;
473
474 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
475 scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
476 KASAN_SHADOW_SCALE_SHIFT;
477 shadow_size = round_up(scaled_size, PAGE_SIZE);
478
479 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
480 return -EINVAL;
481
482 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
483 shadow_start + shadow_size,
484 GFP_KERNEL,
485 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
486 __builtin_return_address(0));
487
488 if (ret) {
489 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
490 find_vm_area(addr)->flags |= VM_KASAN;
491 kmemleak_ignore(ret);
492 return 0;
493 }
494
495 return -ENOMEM;
496}
497
498void kasan_free_shadow(const struct vm_struct *vm)
499{
500 if (vm->flags & VM_KASAN)
501 vfree(kasan_mem_to_shadow(vm->addr));
502}
503
504#endif
505