1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/init.h>
14#include <linux/kasan.h>
15#include <linux/kernel.h>
16#include <linux/kfence.h>
17#include <linux/kmemleak.h>
18#include <linux/memory.h>
19#include <linux/mm.h>
20#include <linux/string.h>
21#include <linux/types.h>
22#include <linux/vmalloc.h>
23
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26
27#include "kasan.h"
28
29bool __kasan_check_read(const volatile void *p, unsigned int size)
30{
31 return kasan_check_range((unsigned long)p, size, false, _RET_IP_);
32}
33EXPORT_SYMBOL(__kasan_check_read);
34
35bool __kasan_check_write(const volatile void *p, unsigned int size)
36{
37 return kasan_check_range((unsigned long)p, size, true, _RET_IP_);
38}
39EXPORT_SYMBOL(__kasan_check_write);
40
41#undef memset
42void *memset(void *addr, int c, size_t len)
43{
44 if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_))
45 return NULL;
46
47 return __memset(addr, c, len);
48}
49
50#ifdef __HAVE_ARCH_MEMMOVE
51#undef memmove
52void *memmove(void *dest, const void *src, size_t len)
53{
54 if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) ||
55 !kasan_check_range((unsigned long)dest, len, true, _RET_IP_))
56 return NULL;
57
58 return __memmove(dest, src, len);
59}
60#endif
61
62#undef memcpy
63void *memcpy(void *dest, const void *src, size_t len)
64{
65 if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) ||
66 !kasan_check_range((unsigned long)dest, len, true, _RET_IP_))
67 return NULL;
68
69 return __memcpy(dest, src, len);
70}
71
72void kasan_poison(const void *addr, size_t size, u8 value, bool init)
73{
74 void *shadow_start, *shadow_end;
75
76 if (!kasan_arch_is_ready())
77 return;
78
79
80
81
82
83
84 addr = kasan_reset_tag(addr);
85
86
87 if (is_kfence_address(addr))
88 return;
89
90 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
91 return;
92 if (WARN_ON(size & KASAN_GRANULE_MASK))
93 return;
94
95 shadow_start = kasan_mem_to_shadow(addr);
96 shadow_end = kasan_mem_to_shadow(addr + size);
97
98 __memset(shadow_start, value, shadow_end - shadow_start);
99}
100EXPORT_SYMBOL(kasan_poison);
101
102#ifdef CONFIG_KASAN_GENERIC
103void kasan_poison_last_granule(const void *addr, size_t size)
104{
105 if (!kasan_arch_is_ready())
106 return;
107
108 if (size & KASAN_GRANULE_MASK) {
109 u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size);
110 *shadow = size & KASAN_GRANULE_MASK;
111 }
112}
113#endif
114
115void kasan_unpoison(const void *addr, size_t size, bool init)
116{
117 u8 tag = get_tag(addr);
118
119
120
121
122
123
124 addr = kasan_reset_tag(addr);
125
126
127
128
129
130
131 if (is_kfence_address(addr))
132 return;
133
134 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
135 return;
136
137
138 kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false);
139
140
141 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
142 kasan_poison_last_granule(addr, size);
143}
144
145#ifdef CONFIG_MEMORY_HOTPLUG
146static bool shadow_mapped(unsigned long addr)
147{
148 pgd_t *pgd = pgd_offset_k(addr);
149 p4d_t *p4d;
150 pud_t *pud;
151 pmd_t *pmd;
152 pte_t *pte;
153
154 if (pgd_none(*pgd))
155 return false;
156 p4d = p4d_offset(pgd, addr);
157 if (p4d_none(*p4d))
158 return false;
159 pud = pud_offset(p4d, addr);
160 if (pud_none(*pud))
161 return false;
162
163
164
165
166
167
168 if (pud_bad(*pud))
169 return true;
170 pmd = pmd_offset(pud, addr);
171 if (pmd_none(*pmd))
172 return false;
173
174 if (pmd_bad(*pmd))
175 return true;
176 pte = pte_offset_kernel(pmd, addr);
177 return !pte_none(*pte);
178}
179
180static int __meminit kasan_mem_notifier(struct notifier_block *nb,
181 unsigned long action, void *data)
182{
183 struct memory_notify *mem_data = data;
184 unsigned long nr_shadow_pages, start_kaddr, shadow_start;
185 unsigned long shadow_end, shadow_size;
186
187 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
188 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
189 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
190 shadow_size = nr_shadow_pages << PAGE_SHIFT;
191 shadow_end = shadow_start + shadow_size;
192
193 if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
194 WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE))
195 return NOTIFY_BAD;
196
197 switch (action) {
198 case MEM_GOING_ONLINE: {
199 void *ret;
200
201
202
203
204
205
206 if (shadow_mapped(shadow_start))
207 return NOTIFY_OK;
208
209 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
210 shadow_end, GFP_KERNEL,
211 PAGE_KERNEL, VM_NO_GUARD,
212 pfn_to_nid(mem_data->start_pfn),
213 __builtin_return_address(0));
214 if (!ret)
215 return NOTIFY_BAD;
216
217 kmemleak_ignore(ret);
218 return NOTIFY_OK;
219 }
220 case MEM_CANCEL_ONLINE:
221 case MEM_OFFLINE: {
222 struct vm_struct *vm;
223
224
225
226
227
228
229
230
231
232
233
234
235
236 vm = find_vm_area((void *)shadow_start);
237 if (vm)
238 vfree((void *)shadow_start);
239 }
240 }
241
242 return NOTIFY_OK;
243}
244
245static int __init kasan_memhotplug_init(void)
246{
247 hotplug_memory_notifier(kasan_mem_notifier, 0);
248
249 return 0;
250}
251
252core_initcall(kasan_memhotplug_init);
253#endif
254
255#ifdef CONFIG_KASAN_VMALLOC
256
257static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
258 void *unused)
259{
260 unsigned long page;
261 pte_t pte;
262
263 if (likely(!pte_none(*ptep)))
264 return 0;
265
266 page = __get_free_page(GFP_KERNEL);
267 if (!page)
268 return -ENOMEM;
269
270 memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
271 pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
272
273 spin_lock(&init_mm.page_table_lock);
274 if (likely(pte_none(*ptep))) {
275 set_pte_at(&init_mm, addr, ptep, pte);
276 page = 0;
277 }
278 spin_unlock(&init_mm.page_table_lock);
279 if (page)
280 free_page(page);
281 return 0;
282}
283
284int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
285{
286 unsigned long shadow_start, shadow_end;
287 int ret;
288
289 if (!is_vmalloc_or_module_addr((void *)addr))
290 return 0;
291
292 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
293 shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
294 shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
295 shadow_end = ALIGN(shadow_end, PAGE_SIZE);
296
297 ret = apply_to_page_range(&init_mm, shadow_start,
298 shadow_end - shadow_start,
299 kasan_populate_vmalloc_pte, NULL);
300 if (ret)
301 return ret;
302
303 flush_cache_vmap(shadow_start, shadow_end);
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340 return 0;
341}
342
343
344
345
346
347void kasan_poison_vmalloc(const void *start, unsigned long size)
348{
349 if (!is_vmalloc_or_module_addr(start))
350 return;
351
352 size = round_up(size, KASAN_GRANULE_SIZE);
353 kasan_poison(start, size, KASAN_VMALLOC_INVALID, false);
354}
355
356void kasan_unpoison_vmalloc(const void *start, unsigned long size)
357{
358 if (!is_vmalloc_or_module_addr(start))
359 return;
360
361 kasan_unpoison(start, size, false);
362}
363
364static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
365 void *unused)
366{
367 unsigned long page;
368
369 page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT);
370
371 spin_lock(&init_mm.page_table_lock);
372
373 if (likely(!pte_none(*ptep))) {
374 pte_clear(&init_mm, addr, ptep);
375 free_page(page);
376 }
377 spin_unlock(&init_mm.page_table_lock);
378
379 return 0;
380}
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457void kasan_release_vmalloc(unsigned long start, unsigned long end,
458 unsigned long free_region_start,
459 unsigned long free_region_end)
460{
461 void *shadow_start, *shadow_end;
462 unsigned long region_start, region_end;
463 unsigned long size;
464
465 region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
466 region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
467
468 free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE);
469
470 if (start != region_start &&
471 free_region_start < region_start)
472 region_start -= KASAN_MEMORY_PER_SHADOW_PAGE;
473
474 free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE);
475
476 if (end != region_end &&
477 free_region_end > region_end)
478 region_end += KASAN_MEMORY_PER_SHADOW_PAGE;
479
480 shadow_start = kasan_mem_to_shadow((void *)region_start);
481 shadow_end = kasan_mem_to_shadow((void *)region_end);
482
483 if (shadow_end > shadow_start) {
484 size = shadow_end - shadow_start;
485 apply_to_existing_page_range(&init_mm,
486 (unsigned long)shadow_start,
487 size, kasan_depopulate_vmalloc_pte,
488 NULL);
489 flush_tlb_kernel_range((unsigned long)shadow_start,
490 (unsigned long)shadow_end);
491 }
492}
493
494#else
495
496int kasan_module_alloc(void *addr, size_t size)
497{
498 void *ret;
499 size_t scaled_size;
500 size_t shadow_size;
501 unsigned long shadow_start;
502
503 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
504 scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
505 KASAN_SHADOW_SCALE_SHIFT;
506 shadow_size = round_up(scaled_size, PAGE_SIZE);
507
508 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
509 return -EINVAL;
510
511 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
512 shadow_start + shadow_size,
513 GFP_KERNEL,
514 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
515 __builtin_return_address(0));
516
517 if (ret) {
518 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
519 find_vm_area(addr)->flags |= VM_KASAN;
520 kmemleak_ignore(ret);
521 return 0;
522 }
523
524 return -ENOMEM;
525}
526
527void kasan_free_shadow(const struct vm_struct *vm)
528{
529 if (vm->flags & VM_KASAN)
530 vfree(kasan_mem_to_shadow(vm->addr));
531}
532
533#endif
534