1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/export.h>
18#include <linux/interrupt.h>
19#include <linux/init.h>
20#include <linux/kasan.h>
21#include <linux/kernel.h>
22#include <linux/kmemleak.h>
23#include <linux/linkage.h>
24#include <linux/memblock.h>
25#include <linux/memory.h>
26#include <linux/mm.h>
27#include <linux/module.h>
28#include <linux/printk.h>
29#include <linux/sched.h>
30#include <linux/sched/task_stack.h>
31#include <linux/slab.h>
32#include <linux/stacktrace.h>
33#include <linux/string.h>
34#include <linux/types.h>
35#include <linux/vmalloc.h>
36#include <linux/bug.h>
37#include <linux/uaccess.h>
38
39#include "kasan.h"
40#include "../slab.h"
41
42static inline int in_irqentry_text(unsigned long ptr)
43{
44 return (ptr >= (unsigned long)&__irqentry_text_start &&
45 ptr < (unsigned long)&__irqentry_text_end) ||
46 (ptr >= (unsigned long)&__softirqentry_text_start &&
47 ptr < (unsigned long)&__softirqentry_text_end);
48}
49
50static inline unsigned int filter_irq_stacks(unsigned long *entries,
51 unsigned int nr_entries)
52{
53 unsigned int i;
54
55 for (i = 0; i < nr_entries; i++) {
56 if (in_irqentry_text(entries[i])) {
57
58 return i + 1;
59 }
60 }
61 return nr_entries;
62}
63
64static inline depot_stack_handle_t save_stack(gfp_t flags)
65{
66 unsigned long entries[KASAN_STACK_DEPTH];
67 unsigned int nr_entries;
68
69 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
70 nr_entries = filter_irq_stacks(entries, nr_entries);
71 return stack_depot_save(entries, nr_entries, flags);
72}
73
74static inline void set_track(struct kasan_track *track, gfp_t flags)
75{
76 track->pid = current->pid;
77 track->stack = save_stack(flags);
78}
79
80void kasan_enable_current(void)
81{
82 current->kasan_depth++;
83}
84
85void kasan_disable_current(void)
86{
87 current->kasan_depth--;
88}
89
90bool __kasan_check_read(const volatile void *p, unsigned int size)
91{
92 return check_memory_region((unsigned long)p, size, false, _RET_IP_);
93}
94EXPORT_SYMBOL(__kasan_check_read);
95
96bool __kasan_check_write(const volatile void *p, unsigned int size)
97{
98 return check_memory_region((unsigned long)p, size, true, _RET_IP_);
99}
100EXPORT_SYMBOL(__kasan_check_write);
101
102#undef memset
103void *memset(void *addr, int c, size_t len)
104{
105 check_memory_region((unsigned long)addr, len, true, _RET_IP_);
106
107 return __memset(addr, c, len);
108}
109
110#undef memmove
111void *memmove(void *dest, const void *src, size_t len)
112{
113 check_memory_region((unsigned long)src, len, false, _RET_IP_);
114 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
115
116 return __memmove(dest, src, len);
117}
118
119#undef memcpy
120void *memcpy(void *dest, const void *src, size_t len)
121{
122 check_memory_region((unsigned long)src, len, false, _RET_IP_);
123 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
124
125 return __memcpy(dest, src, len);
126}
127
128
129
130
131
132void kasan_poison_shadow(const void *address, size_t size, u8 value)
133{
134 void *shadow_start, *shadow_end;
135
136
137
138
139
140
141 address = reset_tag(address);
142
143 shadow_start = kasan_mem_to_shadow(address);
144 shadow_end = kasan_mem_to_shadow(address + size);
145
146 __memset(shadow_start, value, shadow_end - shadow_start);
147}
148
149void kasan_unpoison_shadow(const void *address, size_t size)
150{
151 u8 tag = get_tag(address);
152
153
154
155
156
157
158 address = reset_tag(address);
159
160 kasan_poison_shadow(address, size, tag);
161
162 if (size & KASAN_SHADOW_MASK) {
163 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
164
165 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
166 *shadow = tag;
167 else
168 *shadow = size & KASAN_SHADOW_MASK;
169 }
170}
171
172static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
173{
174 void *base = task_stack_page(task);
175 size_t size = sp - base;
176
177 kasan_unpoison_shadow(base, size);
178}
179
180
181void kasan_unpoison_task_stack(struct task_struct *task)
182{
183 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
184}
185
186
187asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
188{
189
190
191
192
193
194 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
195
196 kasan_unpoison_shadow(base, watermark - base);
197}
198
199
200
201
202
203
204void kasan_unpoison_stack_above_sp_to(const void *watermark)
205{
206 const void *sp = __builtin_frame_address(0);
207 size_t size = watermark - sp;
208
209 if (WARN_ON(sp > watermark))
210 return;
211 kasan_unpoison_shadow(sp, size);
212}
213
214void kasan_alloc_pages(struct page *page, unsigned int order)
215{
216 u8 tag;
217 unsigned long i;
218
219 if (unlikely(PageHighMem(page)))
220 return;
221
222 tag = random_tag();
223 for (i = 0; i < (1 << order); i++)
224 page_kasan_tag_set(page + i, tag);
225 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
226}
227
228void kasan_free_pages(struct page *page, unsigned int order)
229{
230 if (likely(!PageHighMem(page)))
231 kasan_poison_shadow(page_address(page),
232 PAGE_SIZE << order,
233 KASAN_FREE_PAGE);
234}
235
236
237
238
239
240static inline unsigned int optimal_redzone(unsigned int object_size)
241{
242 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
243 return 0;
244
245 return
246 object_size <= 64 - 16 ? 16 :
247 object_size <= 128 - 32 ? 32 :
248 object_size <= 512 - 64 ? 64 :
249 object_size <= 4096 - 128 ? 128 :
250 object_size <= (1 << 14) - 256 ? 256 :
251 object_size <= (1 << 15) - 512 ? 512 :
252 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
253}
254
255void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
256 slab_flags_t *flags)
257{
258 unsigned int orig_size = *size;
259 unsigned int redzone_size;
260 int redzone_adjust;
261
262
263 cache->kasan_info.alloc_meta_offset = *size;
264 *size += sizeof(struct kasan_alloc_meta);
265
266
267 if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
268 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
269 cache->object_size < sizeof(struct kasan_free_meta))) {
270 cache->kasan_info.free_meta_offset = *size;
271 *size += sizeof(struct kasan_free_meta);
272 }
273
274 redzone_size = optimal_redzone(cache->object_size);
275 redzone_adjust = redzone_size - (*size - cache->object_size);
276 if (redzone_adjust > 0)
277 *size += redzone_adjust;
278
279 *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
280 max(*size, cache->object_size + redzone_size));
281
282
283
284
285 if (*size <= cache->kasan_info.alloc_meta_offset ||
286 *size <= cache->kasan_info.free_meta_offset) {
287 cache->kasan_info.alloc_meta_offset = 0;
288 cache->kasan_info.free_meta_offset = 0;
289 *size = orig_size;
290 return;
291 }
292
293 *flags |= SLAB_KASAN;
294}
295
296size_t kasan_metadata_size(struct kmem_cache *cache)
297{
298 return (cache->kasan_info.alloc_meta_offset ?
299 sizeof(struct kasan_alloc_meta) : 0) +
300 (cache->kasan_info.free_meta_offset ?
301 sizeof(struct kasan_free_meta) : 0);
302}
303
304struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
305 const void *object)
306{
307 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
308 return (void *)object + cache->kasan_info.alloc_meta_offset;
309}
310
311struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
312 const void *object)
313{
314 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
315 return (void *)object + cache->kasan_info.free_meta_offset;
316}
317
318void kasan_poison_slab(struct page *page)
319{
320 unsigned long i;
321
322 for (i = 0; i < (1 << compound_order(page)); i++)
323 page_kasan_tag_reset(page + i);
324 kasan_poison_shadow(page_address(page),
325 PAGE_SIZE << compound_order(page),
326 KASAN_KMALLOC_REDZONE);
327}
328
329void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
330{
331 kasan_unpoison_shadow(object, cache->object_size);
332}
333
334void kasan_poison_object_data(struct kmem_cache *cache, void *object)
335{
336 kasan_poison_shadow(object,
337 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
338 KASAN_KMALLOC_REDZONE);
339}
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355static u8 assign_tag(struct kmem_cache *cache, const void *object,
356 bool init, bool keep_tag)
357{
358
359
360
361
362
363
364 if (keep_tag)
365 return get_tag(object);
366
367
368
369
370
371 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
372 return init ? KASAN_TAG_KERNEL : random_tag();
373
374
375#ifdef CONFIG_SLAB
376
377 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
378#else
379
380
381
382
383 return init ? random_tag() : get_tag(object);
384#endif
385}
386
387void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
388 const void *object)
389{
390 struct kasan_alloc_meta *alloc_info;
391
392 if (!(cache->flags & SLAB_KASAN))
393 return (void *)object;
394
395 alloc_info = get_alloc_info(cache, object);
396 __memset(alloc_info, 0, sizeof(*alloc_info));
397
398 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
399 object = set_tag(object,
400 assign_tag(cache, object, true, false));
401
402 return (void *)object;
403}
404
405static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
406{
407 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
408 return shadow_byte < 0 ||
409 shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
410
411
412 if ((u8)shadow_byte == KASAN_TAG_INVALID)
413 return true;
414 if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
415 return true;
416
417 return false;
418}
419
420static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
421 unsigned long ip, bool quarantine)
422{
423 s8 shadow_byte;
424 u8 tag;
425 void *tagged_object;
426 unsigned long rounded_up_size;
427
428 tag = get_tag(object);
429 tagged_object = object;
430 object = reset_tag(object);
431
432 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
433 object)) {
434 kasan_report_invalid_free(tagged_object, ip);
435 return true;
436 }
437
438
439 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
440 return false;
441
442 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
443 if (shadow_invalid(tag, shadow_byte)) {
444 kasan_report_invalid_free(tagged_object, ip);
445 return true;
446 }
447
448 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
449 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
450
451 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
452 unlikely(!(cache->flags & SLAB_KASAN)))
453 return false;
454
455 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
456 quarantine_put(get_free_info(cache, object), cache);
457
458 return IS_ENABLED(CONFIG_KASAN_GENERIC);
459}
460
461bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
462{
463 return __kasan_slab_free(cache, object, ip, true);
464}
465
466static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
467 size_t size, gfp_t flags, bool keep_tag)
468{
469 unsigned long redzone_start;
470 unsigned long redzone_end;
471 u8 tag = 0xff;
472
473 if (gfpflags_allow_blocking(flags))
474 quarantine_reduce();
475
476 if (unlikely(object == NULL))
477 return NULL;
478
479 redzone_start = round_up((unsigned long)(object + size),
480 KASAN_SHADOW_SCALE_SIZE);
481 redzone_end = round_up((unsigned long)object + cache->object_size,
482 KASAN_SHADOW_SCALE_SIZE);
483
484 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
485 tag = assign_tag(cache, object, false, keep_tag);
486
487
488 kasan_unpoison_shadow(set_tag(object, tag), size);
489 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
490 KASAN_KMALLOC_REDZONE);
491
492 if (cache->flags & SLAB_KASAN)
493 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
494
495 return set_tag(object, tag);
496}
497
498void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
499 gfp_t flags)
500{
501 return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
502}
503
504void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
505 size_t size, gfp_t flags)
506{
507 return __kasan_kmalloc(cache, object, size, flags, true);
508}
509EXPORT_SYMBOL(kasan_kmalloc);
510
511void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
512 gfp_t flags)
513{
514 struct page *page;
515 unsigned long redzone_start;
516 unsigned long redzone_end;
517
518 if (gfpflags_allow_blocking(flags))
519 quarantine_reduce();
520
521 if (unlikely(ptr == NULL))
522 return NULL;
523
524 page = virt_to_page(ptr);
525 redzone_start = round_up((unsigned long)(ptr + size),
526 KASAN_SHADOW_SCALE_SIZE);
527 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
528
529 kasan_unpoison_shadow(ptr, size);
530 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
531 KASAN_PAGE_REDZONE);
532
533 return (void *)ptr;
534}
535
536void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
537{
538 struct page *page;
539
540 if (unlikely(object == ZERO_SIZE_PTR))
541 return (void *)object;
542
543 page = virt_to_head_page(object);
544
545 if (unlikely(!PageSlab(page)))
546 return kasan_kmalloc_large(object, size, flags);
547 else
548 return __kasan_kmalloc(page->slab_cache, object, size,
549 flags, true);
550}
551
552void kasan_poison_kfree(void *ptr, unsigned long ip)
553{
554 struct page *page;
555
556 page = virt_to_head_page(ptr);
557
558 if (unlikely(!PageSlab(page))) {
559 if (ptr != page_address(page)) {
560 kasan_report_invalid_free(ptr, ip);
561 return;
562 }
563 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
564 KASAN_FREE_PAGE);
565 } else {
566 __kasan_slab_free(page->slab_cache, ptr, ip, false);
567 }
568}
569
570void kasan_kfree_large(void *ptr, unsigned long ip)
571{
572 if (ptr != page_address(virt_to_head_page(ptr)))
573 kasan_report_invalid_free(ptr, ip);
574
575}
576
577int kasan_module_alloc(void *addr, size_t size)
578{
579 void *ret;
580 size_t scaled_size;
581 size_t shadow_size;
582 unsigned long shadow_start;
583
584 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
585 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
586 shadow_size = round_up(scaled_size, PAGE_SIZE);
587
588 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
589 return -EINVAL;
590
591 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
592 shadow_start + shadow_size,
593 GFP_KERNEL,
594 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
595 __builtin_return_address(0));
596
597 if (ret) {
598 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
599 find_vm_area(addr)->flags |= VM_KASAN;
600 kmemleak_ignore(ret);
601 return 0;
602 }
603
604 return -ENOMEM;
605}
606
607void kasan_free_shadow(const struct vm_struct *vm)
608{
609 if (vm->flags & VM_KASAN)
610 vfree(kasan_mem_to_shadow(vm->addr));
611}
612
613extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
614
615void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
616{
617 unsigned long flags = user_access_save();
618 __kasan_report(addr, size, is_write, ip);
619 user_access_restore(flags);
620}
621
622#ifdef CONFIG_MEMORY_HOTPLUG
623static bool shadow_mapped(unsigned long addr)
624{
625 pgd_t *pgd = pgd_offset_k(addr);
626 p4d_t *p4d;
627 pud_t *pud;
628 pmd_t *pmd;
629 pte_t *pte;
630
631 if (pgd_none(*pgd))
632 return false;
633 p4d = p4d_offset(pgd, addr);
634 if (p4d_none(*p4d))
635 return false;
636 pud = pud_offset(p4d, addr);
637 if (pud_none(*pud))
638 return false;
639
640
641
642
643
644
645 if (pud_bad(*pud))
646 return true;
647 pmd = pmd_offset(pud, addr);
648 if (pmd_none(*pmd))
649 return false;
650
651 if (pmd_bad(*pmd))
652 return true;
653 pte = pte_offset_kernel(pmd, addr);
654 return !pte_none(*pte);
655}
656
657static int __meminit kasan_mem_notifier(struct notifier_block *nb,
658 unsigned long action, void *data)
659{
660 struct memory_notify *mem_data = data;
661 unsigned long nr_shadow_pages, start_kaddr, shadow_start;
662 unsigned long shadow_end, shadow_size;
663
664 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
665 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
666 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
667 shadow_size = nr_shadow_pages << PAGE_SHIFT;
668 shadow_end = shadow_start + shadow_size;
669
670 if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
671 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
672 return NOTIFY_BAD;
673
674 switch (action) {
675 case MEM_GOING_ONLINE: {
676 void *ret;
677
678
679
680
681
682
683 if (shadow_mapped(shadow_start))
684 return NOTIFY_OK;
685
686 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
687 shadow_end, GFP_KERNEL,
688 PAGE_KERNEL, VM_NO_GUARD,
689 pfn_to_nid(mem_data->start_pfn),
690 __builtin_return_address(0));
691 if (!ret)
692 return NOTIFY_BAD;
693
694 kmemleak_ignore(ret);
695 return NOTIFY_OK;
696 }
697 case MEM_CANCEL_ONLINE:
698 case MEM_OFFLINE: {
699 struct vm_struct *vm;
700
701
702
703
704
705
706
707
708
709
710
711
712
713 vm = find_vm_area((void *)shadow_start);
714 if (vm)
715 vfree((void *)shadow_start);
716 }
717 }
718
719 return NOTIFY_OK;
720}
721
722static int __init kasan_memhotplug_init(void)
723{
724 hotplug_memory_notifier(kasan_mem_notifier, 0);
725
726 return 0;
727}
728
729core_initcall(kasan_memhotplug_init);
730#endif
731