1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59
60#include <linux/init.h>
61#include <linux/kernel.h>
62#include <linux/list.h>
63#include <linux/sched/signal.h>
64#include <linux/sched/task.h>
65#include <linux/sched/task_stack.h>
66#include <linux/jiffies.h>
67#include <linux/delay.h>
68#include <linux/export.h>
69#include <linux/kthread.h>
70#include <linux/rbtree.h>
71#include <linux/fs.h>
72#include <linux/debugfs.h>
73#include <linux/seq_file.h>
74#include <linux/cpumask.h>
75#include <linux/spinlock.h>
76#include <linux/module.h>
77#include <linux/mutex.h>
78#include <linux/rcupdate.h>
79#include <linux/stacktrace.h>
80#include <linux/cache.h>
81#include <linux/percpu.h>
82#include <linux/memblock.h>
83#include <linux/pfn.h>
84#include <linux/mmzone.h>
85#include <linux/slab.h>
86#include <linux/thread_info.h>
87#include <linux/err.h>
88#include <linux/uaccess.h>
89#include <linux/string.h>
90#include <linux/nodemask.h>
91#include <linux/mm.h>
92#include <linux/workqueue.h>
93#include <linux/crc32.h>
94
95#include <asm/sections.h>
96#include <asm/processor.h>
97#include <linux/atomic.h>
98
99#include <linux/kasan.h>
100#include <linux/kmemleak.h>
101#include <linux/memory_hotplug.h>
102
103
104
105
106#define MAX_TRACE 16
107#define MSECS_MIN_AGE 5000
108#define SECS_FIRST_SCAN 60
109#define SECS_SCAN_WAIT 600
110#define MAX_SCAN_SIZE 4096
111
112#define BYTES_PER_POINTER sizeof(void *)
113
114
115#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
116 __GFP_NORETRY | __GFP_NOMEMALLOC | \
117 __GFP_NOWARN)
118
119
120struct kmemleak_scan_area {
121 struct hlist_node node;
122 unsigned long start;
123 size_t size;
124};
125
126#define KMEMLEAK_GREY 0
127#define KMEMLEAK_BLACK -1
128
129
130
131
132
133
134
135
136
137struct kmemleak_object {
138 raw_spinlock_t lock;
139 unsigned int flags;
140 struct list_head object_list;
141 struct list_head gray_list;
142 struct rb_node rb_node;
143 struct rcu_head rcu;
144
145 atomic_t use_count;
146 unsigned long pointer;
147 size_t size;
148
149 unsigned long excess_ref;
150
151 int min_count;
152
153 int count;
154
155 u32 checksum;
156
157 struct hlist_head area_list;
158 unsigned long trace[MAX_TRACE];
159 unsigned int trace_len;
160 unsigned long jiffies;
161 pid_t pid;
162 char comm[TASK_COMM_LEN];
163};
164
165
166#define OBJECT_ALLOCATED (1 << 0)
167
168#define OBJECT_REPORTED (1 << 1)
169
170#define OBJECT_NO_SCAN (1 << 2)
171
172#define OBJECT_FULL_SCAN (1 << 3)
173
174#define HEX_PREFIX " "
175
176#define HEX_ROW_SIZE 16
177
178#define HEX_GROUP_SIZE 1
179
180#define HEX_ASCII 1
181
182#define HEX_MAX_LINES 2
183
184
185static LIST_HEAD(object_list);
186
187static LIST_HEAD(gray_list);
188
189static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
190static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
191static LIST_HEAD(mem_pool_free_list);
192
193static struct rb_root object_tree_root = RB_ROOT;
194
195static DEFINE_RAW_SPINLOCK(kmemleak_lock);
196
197
198static struct kmem_cache *object_cache;
199static struct kmem_cache *scan_area_cache;
200
201
202static int kmemleak_enabled = 1;
203
204static int kmemleak_free_enabled = 1;
205
206static int kmemleak_initialized;
207
208static int kmemleak_warning;
209
210static int kmemleak_error;
211
212
213static unsigned long min_addr = ULONG_MAX;
214static unsigned long max_addr;
215
216static struct task_struct *scan_thread;
217
218static unsigned long jiffies_min_age;
219static unsigned long jiffies_last_scan;
220
221static signed long jiffies_scan_wait;
222
223static int kmemleak_stack_scan = 1;
224
225static DEFINE_MUTEX(scan_mutex);
226
227static int kmemleak_skip_disable;
228
229static bool kmemleak_found_leaks;
230
231static bool kmemleak_verbose;
232module_param_named(verbose, kmemleak_verbose, bool, 0600);
233
234static void kmemleak_disable(void);
235
236
237
238
239#define kmemleak_warn(x...) do { \
240 pr_warn(x); \
241 dump_stack(); \
242 kmemleak_warning = 1; \
243} while (0)
244
245
246
247
248
249
250#define kmemleak_stop(x...) do { \
251 kmemleak_warn(x); \
252 kmemleak_disable(); \
253} while (0)
254
255#define warn_or_seq_printf(seq, fmt, ...) do { \
256 if (seq) \
257 seq_printf(seq, fmt, ##__VA_ARGS__); \
258 else \
259 pr_warn(fmt, ##__VA_ARGS__); \
260} while (0)
261
262static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
263 int rowsize, int groupsize, const void *buf,
264 size_t len, bool ascii)
265{
266 if (seq)
267 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
268 buf, len, ascii);
269 else
270 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
271 rowsize, groupsize, buf, len, ascii);
272}
273
274
275
276
277
278
279
280static void hex_dump_object(struct seq_file *seq,
281 struct kmemleak_object *object)
282{
283 const u8 *ptr = (const u8 *)object->pointer;
284 size_t len;
285
286
287 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
288
289 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
290 kasan_disable_current();
291 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
292 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
293 kasan_enable_current();
294}
295
296
297
298
299
300
301
302
303
304
305
306static bool color_white(const struct kmemleak_object *object)
307{
308 return object->count != KMEMLEAK_BLACK &&
309 object->count < object->min_count;
310}
311
312static bool color_gray(const struct kmemleak_object *object)
313{
314 return object->min_count != KMEMLEAK_BLACK &&
315 object->count >= object->min_count;
316}
317
318
319
320
321
322
323static bool unreferenced_object(struct kmemleak_object *object)
324{
325 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
326 time_before_eq(object->jiffies + jiffies_min_age,
327 jiffies_last_scan);
328}
329
330
331
332
333
334static void print_unreferenced(struct seq_file *seq,
335 struct kmemleak_object *object)
336{
337 int i;
338 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
339
340 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
341 object->pointer, object->size);
342 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
343 object->comm, object->pid, object->jiffies,
344 msecs_age / 1000, msecs_age % 1000);
345 hex_dump_object(seq, object);
346 warn_or_seq_printf(seq, " backtrace:\n");
347
348 for (i = 0; i < object->trace_len; i++) {
349 void *ptr = (void *)object->trace[i];
350 warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
351 }
352}
353
354
355
356
357
358
359static void dump_object_info(struct kmemleak_object *object)
360{
361 pr_notice("Object 0x%08lx (size %zu):\n",
362 object->pointer, object->size);
363 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
364 object->comm, object->pid, object->jiffies);
365 pr_notice(" min_count = %d\n", object->min_count);
366 pr_notice(" count = %d\n", object->count);
367 pr_notice(" flags = 0x%x\n", object->flags);
368 pr_notice(" checksum = %u\n", object->checksum);
369 pr_notice(" backtrace:\n");
370 stack_trace_print(object->trace, object->trace_len, 4);
371}
372
373
374
375
376
377
378
379static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
380{
381 struct rb_node *rb = object_tree_root.rb_node;
382
383 while (rb) {
384 struct kmemleak_object *object =
385 rb_entry(rb, struct kmemleak_object, rb_node);
386 if (ptr < object->pointer)
387 rb = object->rb_node.rb_left;
388 else if (object->pointer + object->size <= ptr)
389 rb = object->rb_node.rb_right;
390 else if (object->pointer == ptr || alias)
391 return object;
392 else {
393 kmemleak_warn("Found object by alias at 0x%08lx\n",
394 ptr);
395 dump_object_info(object);
396 break;
397 }
398 }
399 return NULL;
400}
401
402
403
404
405
406
407
408static int get_object(struct kmemleak_object *object)
409{
410 return atomic_inc_not_zero(&object->use_count);
411}
412
413
414
415
416static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
417{
418 unsigned long flags;
419 struct kmemleak_object *object;
420
421
422 if (object_cache) {
423 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
424 if (object)
425 return object;
426 }
427
428
429 raw_spin_lock_irqsave(&kmemleak_lock, flags);
430 object = list_first_entry_or_null(&mem_pool_free_list,
431 typeof(*object), object_list);
432 if (object)
433 list_del(&object->object_list);
434 else if (mem_pool_free_count)
435 object = &mem_pool[--mem_pool_free_count];
436 else
437 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
438 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
439
440 return object;
441}
442
443
444
445
446static void mem_pool_free(struct kmemleak_object *object)
447{
448 unsigned long flags;
449
450 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
451 kmem_cache_free(object_cache, object);
452 return;
453 }
454
455
456 raw_spin_lock_irqsave(&kmemleak_lock, flags);
457 list_add(&object->object_list, &mem_pool_free_list);
458 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
459}
460
461
462
463
464static void free_object_rcu(struct rcu_head *rcu)
465{
466 struct hlist_node *tmp;
467 struct kmemleak_scan_area *area;
468 struct kmemleak_object *object =
469 container_of(rcu, struct kmemleak_object, rcu);
470
471
472
473
474
475 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
476 hlist_del(&area->node);
477 kmem_cache_free(scan_area_cache, area);
478 }
479 mem_pool_free(object);
480}
481
482
483
484
485
486
487
488
489static void put_object(struct kmemleak_object *object)
490{
491 if (!atomic_dec_and_test(&object->use_count))
492 return;
493
494
495 WARN_ON(object->flags & OBJECT_ALLOCATED);
496
497
498
499
500
501
502 if (object_cache)
503 call_rcu(&object->rcu, free_object_rcu);
504 else
505 free_object_rcu(&object->rcu);
506}
507
508
509
510
511static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
512{
513 unsigned long flags;
514 struct kmemleak_object *object;
515
516 rcu_read_lock();
517 raw_spin_lock_irqsave(&kmemleak_lock, flags);
518 object = lookup_object(ptr, alias);
519 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
520
521
522 if (object && !get_object(object))
523 object = NULL;
524 rcu_read_unlock();
525
526 return object;
527}
528
529
530
531
532
533static void __remove_object(struct kmemleak_object *object)
534{
535 rb_erase(&object->rb_node, &object_tree_root);
536 list_del_rcu(&object->object_list);
537}
538
539
540
541
542
543
544static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
545{
546 unsigned long flags;
547 struct kmemleak_object *object;
548
549 raw_spin_lock_irqsave(&kmemleak_lock, flags);
550 object = lookup_object(ptr, alias);
551 if (object)
552 __remove_object(object);
553 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
554
555 return object;
556}
557
558
559
560
561static int __save_stack_trace(unsigned long *trace)
562{
563 return stack_trace_save(trace, MAX_TRACE, 2);
564}
565
566
567
568
569
570static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
571 int min_count, gfp_t gfp)
572{
573 unsigned long flags;
574 struct kmemleak_object *object, *parent;
575 struct rb_node **link, *rb_parent;
576 unsigned long untagged_ptr;
577
578 object = mem_pool_alloc(gfp);
579 if (!object) {
580 pr_warn("Cannot allocate a kmemleak_object structure\n");
581 kmemleak_disable();
582 return NULL;
583 }
584
585 INIT_LIST_HEAD(&object->object_list);
586 INIT_LIST_HEAD(&object->gray_list);
587 INIT_HLIST_HEAD(&object->area_list);
588 raw_spin_lock_init(&object->lock);
589 atomic_set(&object->use_count, 1);
590 object->flags = OBJECT_ALLOCATED;
591 object->pointer = ptr;
592 object->size = size;
593 object->excess_ref = 0;
594 object->min_count = min_count;
595 object->count = 0;
596 object->jiffies = jiffies;
597 object->checksum = 0;
598
599
600 if (in_irq()) {
601 object->pid = 0;
602 strncpy(object->comm, "hardirq", sizeof(object->comm));
603 } else if (in_serving_softirq()) {
604 object->pid = 0;
605 strncpy(object->comm, "softirq", sizeof(object->comm));
606 } else {
607 object->pid = current->pid;
608
609
610
611
612
613
614 strncpy(object->comm, current->comm, sizeof(object->comm));
615 }
616
617
618 object->trace_len = __save_stack_trace(object->trace);
619
620 raw_spin_lock_irqsave(&kmemleak_lock, flags);
621
622 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
623 min_addr = min(min_addr, untagged_ptr);
624 max_addr = max(max_addr, untagged_ptr + size);
625 link = &object_tree_root.rb_node;
626 rb_parent = NULL;
627 while (*link) {
628 rb_parent = *link;
629 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
630 if (ptr + size <= parent->pointer)
631 link = &parent->rb_node.rb_left;
632 else if (parent->pointer + parent->size <= ptr)
633 link = &parent->rb_node.rb_right;
634 else {
635 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
636 ptr);
637
638
639
640
641 dump_object_info(parent);
642 kmem_cache_free(object_cache, object);
643 object = NULL;
644 goto out;
645 }
646 }
647 rb_link_node(&object->rb_node, rb_parent, link);
648 rb_insert_color(&object->rb_node, &object_tree_root);
649
650 list_add_tail_rcu(&object->object_list, &object_list);
651out:
652 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
653 return object;
654}
655
656
657
658
659static void __delete_object(struct kmemleak_object *object)
660{
661 unsigned long flags;
662
663 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
664 WARN_ON(atomic_read(&object->use_count) < 1);
665
666
667
668
669
670 raw_spin_lock_irqsave(&object->lock, flags);
671 object->flags &= ~OBJECT_ALLOCATED;
672 raw_spin_unlock_irqrestore(&object->lock, flags);
673 put_object(object);
674}
675
676
677
678
679
680static void delete_object_full(unsigned long ptr)
681{
682 struct kmemleak_object *object;
683
684 object = find_and_remove_object(ptr, 0);
685 if (!object) {
686#ifdef DEBUG
687 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
688 ptr);
689#endif
690 return;
691 }
692 __delete_object(object);
693}
694
695
696
697
698
699
700static void delete_object_part(unsigned long ptr, size_t size)
701{
702 struct kmemleak_object *object;
703 unsigned long start, end;
704
705 object = find_and_remove_object(ptr, 1);
706 if (!object) {
707#ifdef DEBUG
708 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
709 ptr, size);
710#endif
711 return;
712 }
713
714
715
716
717
718
719 start = object->pointer;
720 end = object->pointer + object->size;
721 if (ptr > start)
722 create_object(start, ptr - start, object->min_count,
723 GFP_KERNEL);
724 if (ptr + size < end)
725 create_object(ptr + size, end - ptr - size, object->min_count,
726 GFP_KERNEL);
727
728 __delete_object(object);
729}
730
731static void __paint_it(struct kmemleak_object *object, int color)
732{
733 object->min_count = color;
734 if (color == KMEMLEAK_BLACK)
735 object->flags |= OBJECT_NO_SCAN;
736}
737
738static void paint_it(struct kmemleak_object *object, int color)
739{
740 unsigned long flags;
741
742 raw_spin_lock_irqsave(&object->lock, flags);
743 __paint_it(object, color);
744 raw_spin_unlock_irqrestore(&object->lock, flags);
745}
746
747static void paint_ptr(unsigned long ptr, int color)
748{
749 struct kmemleak_object *object;
750
751 object = find_and_get_object(ptr, 0);
752 if (!object) {
753 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
754 ptr,
755 (color == KMEMLEAK_GREY) ? "Grey" :
756 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
757 return;
758 }
759 paint_it(object, color);
760 put_object(object);
761}
762
763
764
765
766
767static void make_gray_object(unsigned long ptr)
768{
769 paint_ptr(ptr, KMEMLEAK_GREY);
770}
771
772
773
774
775
776static void make_black_object(unsigned long ptr)
777{
778 paint_ptr(ptr, KMEMLEAK_BLACK);
779}
780
781
782
783
784
785static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
786{
787 unsigned long flags;
788 struct kmemleak_object *object;
789 struct kmemleak_scan_area *area = NULL;
790
791 object = find_and_get_object(ptr, 1);
792 if (!object) {
793 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
794 ptr);
795 return;
796 }
797
798 if (scan_area_cache)
799 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
800
801 raw_spin_lock_irqsave(&object->lock, flags);
802 if (!area) {
803 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
804
805 object->flags |= OBJECT_FULL_SCAN;
806 goto out_unlock;
807 }
808 if (size == SIZE_MAX) {
809 size = object->pointer + object->size - ptr;
810 } else if (ptr + size > object->pointer + object->size) {
811 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
812 dump_object_info(object);
813 kmem_cache_free(scan_area_cache, area);
814 goto out_unlock;
815 }
816
817 INIT_HLIST_NODE(&area->node);
818 area->start = ptr;
819 area->size = size;
820
821 hlist_add_head(&area->node, &object->area_list);
822out_unlock:
823 raw_spin_unlock_irqrestore(&object->lock, flags);
824 put_object(object);
825}
826
827
828
829
830
831
832
833static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
834{
835 unsigned long flags;
836 struct kmemleak_object *object;
837
838 object = find_and_get_object(ptr, 0);
839 if (!object) {
840 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
841 ptr);
842 return;
843 }
844
845 raw_spin_lock_irqsave(&object->lock, flags);
846 object->excess_ref = excess_ref;
847 raw_spin_unlock_irqrestore(&object->lock, flags);
848 put_object(object);
849}
850
851
852
853
854
855
856static void object_no_scan(unsigned long ptr)
857{
858 unsigned long flags;
859 struct kmemleak_object *object;
860
861 object = find_and_get_object(ptr, 0);
862 if (!object) {
863 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
864 return;
865 }
866
867 raw_spin_lock_irqsave(&object->lock, flags);
868 object->flags |= OBJECT_NO_SCAN;
869 raw_spin_unlock_irqrestore(&object->lock, flags);
870 put_object(object);
871}
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
888 gfp_t gfp)
889{
890 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
891
892 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
893 create_object((unsigned long)ptr, size, min_count, gfp);
894}
895EXPORT_SYMBOL_GPL(kmemleak_alloc);
896
897
898
899
900
901
902
903
904
905
906void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
907 gfp_t gfp)
908{
909 unsigned int cpu;
910
911 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
912
913
914
915
916
917 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
918 for_each_possible_cpu(cpu)
919 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
920 size, 0, gfp);
921}
922EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
923
924
925
926
927
928
929
930
931
932
933void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
934{
935 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
936
937
938
939
940
941 if (kmemleak_enabled) {
942 create_object((unsigned long)area->addr, size, 2, gfp);
943 object_set_excess_ref((unsigned long)area,
944 (unsigned long)area->addr);
945 }
946}
947EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
948
949
950
951
952
953
954
955
956void __ref kmemleak_free(const void *ptr)
957{
958 pr_debug("%s(0x%p)\n", __func__, ptr);
959
960 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
961 delete_object_full((unsigned long)ptr);
962}
963EXPORT_SYMBOL_GPL(kmemleak_free);
964
965
966
967
968
969
970
971
972
973
974void __ref kmemleak_free_part(const void *ptr, size_t size)
975{
976 pr_debug("%s(0x%p)\n", __func__, ptr);
977
978 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
979 delete_object_part((unsigned long)ptr, size);
980}
981EXPORT_SYMBOL_GPL(kmemleak_free_part);
982
983
984
985
986
987
988
989
990void __ref kmemleak_free_percpu(const void __percpu *ptr)
991{
992 unsigned int cpu;
993
994 pr_debug("%s(0x%p)\n", __func__, ptr);
995
996 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
997 for_each_possible_cpu(cpu)
998 delete_object_full((unsigned long)per_cpu_ptr(ptr,
999 cpu));
1000}
1001EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1002
1003
1004
1005
1006
1007
1008
1009
1010void __ref kmemleak_update_trace(const void *ptr)
1011{
1012 struct kmemleak_object *object;
1013 unsigned long flags;
1014
1015 pr_debug("%s(0x%p)\n", __func__, ptr);
1016
1017 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1018 return;
1019
1020 object = find_and_get_object((unsigned long)ptr, 1);
1021 if (!object) {
1022#ifdef DEBUG
1023 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1024 ptr);
1025#endif
1026 return;
1027 }
1028
1029 raw_spin_lock_irqsave(&object->lock, flags);
1030 object->trace_len = __save_stack_trace(object->trace);
1031 raw_spin_unlock_irqrestore(&object->lock, flags);
1032
1033 put_object(object);
1034}
1035EXPORT_SYMBOL(kmemleak_update_trace);
1036
1037
1038
1039
1040
1041
1042
1043
1044void __ref kmemleak_not_leak(const void *ptr)
1045{
1046 pr_debug("%s(0x%p)\n", __func__, ptr);
1047
1048 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1049 make_gray_object((unsigned long)ptr);
1050}
1051EXPORT_SYMBOL(kmemleak_not_leak);
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062void __ref kmemleak_ignore(const void *ptr)
1063{
1064 pr_debug("%s(0x%p)\n", __func__, ptr);
1065
1066 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1067 make_black_object((unsigned long)ptr);
1068}
1069EXPORT_SYMBOL(kmemleak_ignore);
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1083{
1084 pr_debug("%s(0x%p)\n", __func__, ptr);
1085
1086 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1087 add_scan_area((unsigned long)ptr, size, gfp);
1088}
1089EXPORT_SYMBOL(kmemleak_scan_area);
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100void __ref kmemleak_no_scan(const void *ptr)
1101{
1102 pr_debug("%s(0x%p)\n", __func__, ptr);
1103
1104 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1105 object_no_scan((unsigned long)ptr);
1106}
1107EXPORT_SYMBOL(kmemleak_no_scan);
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1119 gfp_t gfp)
1120{
1121 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1122 kmemleak_alloc(__va(phys), size, min_count, gfp);
1123}
1124EXPORT_SYMBOL(kmemleak_alloc_phys);
1125
1126
1127
1128
1129
1130
1131
1132
1133void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1134{
1135 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1136 kmemleak_free_part(__va(phys), size);
1137}
1138EXPORT_SYMBOL(kmemleak_free_part_phys);
1139
1140
1141
1142
1143
1144
1145void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1146{
1147 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1148 kmemleak_not_leak(__va(phys));
1149}
1150EXPORT_SYMBOL(kmemleak_not_leak_phys);
1151
1152
1153
1154
1155
1156
1157void __ref kmemleak_ignore_phys(phys_addr_t phys)
1158{
1159 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1160 kmemleak_ignore(__va(phys));
1161}
1162EXPORT_SYMBOL(kmemleak_ignore_phys);
1163
1164
1165
1166
1167static bool update_checksum(struct kmemleak_object *object)
1168{
1169 u32 old_csum = object->checksum;
1170
1171 kasan_disable_current();
1172 object->checksum = crc32(0, (void *)object->pointer, object->size);
1173 kasan_enable_current();
1174
1175 return object->checksum != old_csum;
1176}
1177
1178
1179
1180
1181static void update_refs(struct kmemleak_object *object)
1182{
1183 if (!color_white(object)) {
1184
1185 return;
1186 }
1187
1188
1189
1190
1191
1192
1193
1194 object->count++;
1195 if (color_gray(object)) {
1196
1197 WARN_ON(!get_object(object));
1198 list_add_tail(&object->gray_list, &gray_list);
1199 }
1200}
1201
1202
1203
1204
1205
1206static int scan_should_stop(void)
1207{
1208 if (!kmemleak_enabled)
1209 return 1;
1210
1211
1212
1213
1214
1215 if (current->mm)
1216 return signal_pending(current);
1217 else
1218 return kthread_should_stop();
1219
1220 return 0;
1221}
1222
1223
1224
1225
1226
1227static void scan_block(void *_start, void *_end,
1228 struct kmemleak_object *scanned)
1229{
1230 unsigned long *ptr;
1231 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1232 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1233 unsigned long flags;
1234 unsigned long untagged_ptr;
1235
1236 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1237 for (ptr = start; ptr < end; ptr++) {
1238 struct kmemleak_object *object;
1239 unsigned long pointer;
1240 unsigned long excess_ref;
1241
1242 if (scan_should_stop())
1243 break;
1244
1245 kasan_disable_current();
1246 pointer = *ptr;
1247 kasan_enable_current();
1248
1249 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1250 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1251 continue;
1252
1253
1254
1255
1256
1257
1258
1259 object = lookup_object(pointer, 1);
1260 if (!object)
1261 continue;
1262 if (object == scanned)
1263
1264 continue;
1265
1266
1267
1268
1269
1270
1271 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1272
1273 if (color_gray(object)) {
1274 excess_ref = object->excess_ref;
1275
1276 } else {
1277 excess_ref = 0;
1278 update_refs(object);
1279 }
1280 raw_spin_unlock(&object->lock);
1281
1282 if (excess_ref) {
1283 object = lookup_object(excess_ref, 0);
1284 if (!object)
1285 continue;
1286 if (object == scanned)
1287
1288 continue;
1289 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1290 update_refs(object);
1291 raw_spin_unlock(&object->lock);
1292 }
1293 }
1294 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1295}
1296
1297
1298
1299
1300#ifdef CONFIG_SMP
1301static void scan_large_block(void *start, void *end)
1302{
1303 void *next;
1304
1305 while (start < end) {
1306 next = min(start + MAX_SCAN_SIZE, end);
1307 scan_block(start, next, NULL);
1308 start = next;
1309 cond_resched();
1310 }
1311}
1312#endif
1313
1314
1315
1316
1317
1318static void scan_object(struct kmemleak_object *object)
1319{
1320 struct kmemleak_scan_area *area;
1321 unsigned long flags;
1322
1323
1324
1325
1326
1327 raw_spin_lock_irqsave(&object->lock, flags);
1328 if (object->flags & OBJECT_NO_SCAN)
1329 goto out;
1330 if (!(object->flags & OBJECT_ALLOCATED))
1331
1332 goto out;
1333 if (hlist_empty(&object->area_list) ||
1334 object->flags & OBJECT_FULL_SCAN) {
1335 void *start = (void *)object->pointer;
1336 void *end = (void *)(object->pointer + object->size);
1337 void *next;
1338
1339 do {
1340 next = min(start + MAX_SCAN_SIZE, end);
1341 scan_block(start, next, object);
1342
1343 start = next;
1344 if (start >= end)
1345 break;
1346
1347 raw_spin_unlock_irqrestore(&object->lock, flags);
1348 cond_resched();
1349 raw_spin_lock_irqsave(&object->lock, flags);
1350 } while (object->flags & OBJECT_ALLOCATED);
1351 } else
1352 hlist_for_each_entry(area, &object->area_list, node)
1353 scan_block((void *)area->start,
1354 (void *)(area->start + area->size),
1355 object);
1356out:
1357 raw_spin_unlock_irqrestore(&object->lock, flags);
1358}
1359
1360
1361
1362
1363
1364static void scan_gray_list(void)
1365{
1366 struct kmemleak_object *object, *tmp;
1367
1368
1369
1370
1371
1372
1373 object = list_entry(gray_list.next, typeof(*object), gray_list);
1374 while (&object->gray_list != &gray_list) {
1375 cond_resched();
1376
1377
1378 if (!scan_should_stop())
1379 scan_object(object);
1380
1381 tmp = list_entry(object->gray_list.next, typeof(*object),
1382 gray_list);
1383
1384
1385 list_del(&object->gray_list);
1386 put_object(object);
1387
1388 object = tmp;
1389 }
1390 WARN_ON(!list_empty(&gray_list));
1391}
1392
1393
1394
1395
1396
1397
1398static void kmemleak_scan(void)
1399{
1400 unsigned long flags;
1401 struct kmemleak_object *object;
1402 int i;
1403 int new_leaks = 0;
1404
1405 jiffies_last_scan = jiffies;
1406
1407
1408 rcu_read_lock();
1409 list_for_each_entry_rcu(object, &object_list, object_list) {
1410 raw_spin_lock_irqsave(&object->lock, flags);
1411#ifdef DEBUG
1412
1413
1414
1415
1416 if (atomic_read(&object->use_count) > 1) {
1417 pr_debug("object->use_count = %d\n",
1418 atomic_read(&object->use_count));
1419 dump_object_info(object);
1420 }
1421#endif
1422
1423 object->count = 0;
1424 if (color_gray(object) && get_object(object))
1425 list_add_tail(&object->gray_list, &gray_list);
1426
1427 raw_spin_unlock_irqrestore(&object->lock, flags);
1428 }
1429 rcu_read_unlock();
1430
1431#ifdef CONFIG_SMP
1432
1433 for_each_possible_cpu(i)
1434 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1435 __per_cpu_end + per_cpu_offset(i));
1436#endif
1437
1438
1439
1440
1441 get_online_mems();
1442 for_each_online_node(i) {
1443 unsigned long start_pfn = node_start_pfn(i);
1444 unsigned long end_pfn = node_end_pfn(i);
1445 unsigned long pfn;
1446
1447 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1448 struct page *page = pfn_to_online_page(pfn);
1449
1450 if (!page)
1451 continue;
1452
1453
1454 if (page_to_nid(page) != i)
1455 continue;
1456
1457 if (page_count(page) == 0)
1458 continue;
1459 scan_block(page, page + 1, NULL);
1460 if (!(pfn & 63))
1461 cond_resched();
1462 }
1463 }
1464 put_online_mems();
1465
1466
1467
1468
1469 if (kmemleak_stack_scan) {
1470 struct task_struct *p, *g;
1471
1472 read_lock(&tasklist_lock);
1473 do_each_thread(g, p) {
1474 void *stack = try_get_task_stack(p);
1475 if (stack) {
1476 scan_block(stack, stack + THREAD_SIZE, NULL);
1477 put_task_stack(p);
1478 }
1479 } while_each_thread(g, p);
1480 read_unlock(&tasklist_lock);
1481 }
1482
1483
1484
1485
1486
1487 scan_gray_list();
1488
1489
1490
1491
1492
1493 rcu_read_lock();
1494 list_for_each_entry_rcu(object, &object_list, object_list) {
1495 raw_spin_lock_irqsave(&object->lock, flags);
1496 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1497 && update_checksum(object) && get_object(object)) {
1498
1499 object->count = object->min_count;
1500 list_add_tail(&object->gray_list, &gray_list);
1501 }
1502 raw_spin_unlock_irqrestore(&object->lock, flags);
1503 }
1504 rcu_read_unlock();
1505
1506
1507
1508
1509 scan_gray_list();
1510
1511
1512
1513
1514 if (scan_should_stop())
1515 return;
1516
1517
1518
1519
1520 rcu_read_lock();
1521 list_for_each_entry_rcu(object, &object_list, object_list) {
1522 raw_spin_lock_irqsave(&object->lock, flags);
1523 if (unreferenced_object(object) &&
1524 !(object->flags & OBJECT_REPORTED)) {
1525 object->flags |= OBJECT_REPORTED;
1526
1527 if (kmemleak_verbose)
1528 print_unreferenced(NULL, object);
1529
1530 new_leaks++;
1531 }
1532 raw_spin_unlock_irqrestore(&object->lock, flags);
1533 }
1534 rcu_read_unlock();
1535
1536 if (new_leaks) {
1537 kmemleak_found_leaks = true;
1538
1539 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1540 new_leaks);
1541 }
1542
1543}
1544
1545
1546
1547
1548
1549static int kmemleak_scan_thread(void *arg)
1550{
1551 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1552
1553 pr_info("Automatic memory scanning thread started\n");
1554 set_user_nice(current, 10);
1555
1556
1557
1558
1559 if (first_run) {
1560 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1561 first_run = 0;
1562 while (timeout && !kthread_should_stop())
1563 timeout = schedule_timeout_interruptible(timeout);
1564 }
1565
1566 while (!kthread_should_stop()) {
1567 signed long timeout = jiffies_scan_wait;
1568
1569 mutex_lock(&scan_mutex);
1570 kmemleak_scan();
1571 mutex_unlock(&scan_mutex);
1572
1573
1574 while (timeout && !kthread_should_stop())
1575 timeout = schedule_timeout_interruptible(timeout);
1576 }
1577
1578 pr_info("Automatic memory scanning thread ended\n");
1579
1580 return 0;
1581}
1582
1583
1584
1585
1586
1587static void start_scan_thread(void)
1588{
1589 if (scan_thread)
1590 return;
1591 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1592 if (IS_ERR(scan_thread)) {
1593 pr_warn("Failed to create the scan thread\n");
1594 scan_thread = NULL;
1595 }
1596}
1597
1598
1599
1600
1601static void stop_scan_thread(void)
1602{
1603 if (scan_thread) {
1604 kthread_stop(scan_thread);
1605 scan_thread = NULL;
1606 }
1607}
1608
1609
1610
1611
1612
1613
1614static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1615{
1616 struct kmemleak_object *object;
1617 loff_t n = *pos;
1618 int err;
1619
1620 err = mutex_lock_interruptible(&scan_mutex);
1621 if (err < 0)
1622 return ERR_PTR(err);
1623
1624 rcu_read_lock();
1625 list_for_each_entry_rcu(object, &object_list, object_list) {
1626 if (n-- > 0)
1627 continue;
1628 if (get_object(object))
1629 goto out;
1630 }
1631 object = NULL;
1632out:
1633 return object;
1634}
1635
1636
1637
1638
1639
1640static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1641{
1642 struct kmemleak_object *prev_obj = v;
1643 struct kmemleak_object *next_obj = NULL;
1644 struct kmemleak_object *obj = prev_obj;
1645
1646 ++(*pos);
1647
1648 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1649 if (get_object(obj)) {
1650 next_obj = obj;
1651 break;
1652 }
1653 }
1654
1655 put_object(prev_obj);
1656 return next_obj;
1657}
1658
1659
1660
1661
1662static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1663{
1664 if (!IS_ERR(v)) {
1665
1666
1667
1668
1669 rcu_read_unlock();
1670 mutex_unlock(&scan_mutex);
1671 if (v)
1672 put_object(v);
1673 }
1674}
1675
1676
1677
1678
1679static int kmemleak_seq_show(struct seq_file *seq, void *v)
1680{
1681 struct kmemleak_object *object = v;
1682 unsigned long flags;
1683
1684 raw_spin_lock_irqsave(&object->lock, flags);
1685 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1686 print_unreferenced(seq, object);
1687 raw_spin_unlock_irqrestore(&object->lock, flags);
1688 return 0;
1689}
1690
1691static const struct seq_operations kmemleak_seq_ops = {
1692 .start = kmemleak_seq_start,
1693 .next = kmemleak_seq_next,
1694 .stop = kmemleak_seq_stop,
1695 .show = kmemleak_seq_show,
1696};
1697
1698static int kmemleak_open(struct inode *inode, struct file *file)
1699{
1700 return seq_open(file, &kmemleak_seq_ops);
1701}
1702
1703static int dump_str_object_info(const char *str)
1704{
1705 unsigned long flags;
1706 struct kmemleak_object *object;
1707 unsigned long addr;
1708
1709 if (kstrtoul(str, 0, &addr))
1710 return -EINVAL;
1711 object = find_and_get_object(addr, 0);
1712 if (!object) {
1713 pr_info("Unknown object at 0x%08lx\n", addr);
1714 return -EINVAL;
1715 }
1716
1717 raw_spin_lock_irqsave(&object->lock, flags);
1718 dump_object_info(object);
1719 raw_spin_unlock_irqrestore(&object->lock, flags);
1720
1721 put_object(object);
1722 return 0;
1723}
1724
1725
1726
1727
1728
1729
1730
1731static void kmemleak_clear(void)
1732{
1733 struct kmemleak_object *object;
1734 unsigned long flags;
1735
1736 rcu_read_lock();
1737 list_for_each_entry_rcu(object, &object_list, object_list) {
1738 raw_spin_lock_irqsave(&object->lock, flags);
1739 if ((object->flags & OBJECT_REPORTED) &&
1740 unreferenced_object(object))
1741 __paint_it(object, KMEMLEAK_GREY);
1742 raw_spin_unlock_irqrestore(&object->lock, flags);
1743 }
1744 rcu_read_unlock();
1745
1746 kmemleak_found_leaks = false;
1747}
1748
1749static void __kmemleak_do_cleanup(void);
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1768 size_t size, loff_t *ppos)
1769{
1770 char buf[64];
1771 int buf_size;
1772 int ret;
1773
1774 buf_size = min(size, (sizeof(buf) - 1));
1775 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1776 return -EFAULT;
1777 buf[buf_size] = 0;
1778
1779 ret = mutex_lock_interruptible(&scan_mutex);
1780 if (ret < 0)
1781 return ret;
1782
1783 if (strncmp(buf, "clear", 5) == 0) {
1784 if (kmemleak_enabled)
1785 kmemleak_clear();
1786 else
1787 __kmemleak_do_cleanup();
1788 goto out;
1789 }
1790
1791 if (!kmemleak_enabled) {
1792 ret = -EPERM;
1793 goto out;
1794 }
1795
1796 if (strncmp(buf, "off", 3) == 0)
1797 kmemleak_disable();
1798 else if (strncmp(buf, "stack=on", 8) == 0)
1799 kmemleak_stack_scan = 1;
1800 else if (strncmp(buf, "stack=off", 9) == 0)
1801 kmemleak_stack_scan = 0;
1802 else if (strncmp(buf, "scan=on", 7) == 0)
1803 start_scan_thread();
1804 else if (strncmp(buf, "scan=off", 8) == 0)
1805 stop_scan_thread();
1806 else if (strncmp(buf, "scan=", 5) == 0) {
1807 unsigned long secs;
1808
1809 ret = kstrtoul(buf + 5, 0, &secs);
1810 if (ret < 0)
1811 goto out;
1812 stop_scan_thread();
1813 if (secs) {
1814 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1815 start_scan_thread();
1816 }
1817 } else if (strncmp(buf, "scan", 4) == 0)
1818 kmemleak_scan();
1819 else if (strncmp(buf, "dump=", 5) == 0)
1820 ret = dump_str_object_info(buf + 5);
1821 else
1822 ret = -EINVAL;
1823
1824out:
1825 mutex_unlock(&scan_mutex);
1826 if (ret < 0)
1827 return ret;
1828
1829
1830 *ppos += size;
1831 return size;
1832}
1833
1834static const struct file_operations kmemleak_fops = {
1835 .owner = THIS_MODULE,
1836 .open = kmemleak_open,
1837 .read = seq_read,
1838 .write = kmemleak_write,
1839 .llseek = seq_lseek,
1840 .release = seq_release,
1841};
1842
1843static void __kmemleak_do_cleanup(void)
1844{
1845 struct kmemleak_object *object, *tmp;
1846
1847
1848
1849
1850
1851 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1852 __remove_object(object);
1853 __delete_object(object);
1854 }
1855}
1856
1857
1858
1859
1860
1861
1862static void kmemleak_do_cleanup(struct work_struct *work)
1863{
1864 stop_scan_thread();
1865
1866 mutex_lock(&scan_mutex);
1867
1868
1869
1870
1871
1872
1873 kmemleak_free_enabled = 0;
1874 mutex_unlock(&scan_mutex);
1875
1876 if (!kmemleak_found_leaks)
1877 __kmemleak_do_cleanup();
1878 else
1879 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1880}
1881
1882static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1883
1884
1885
1886
1887
1888static void kmemleak_disable(void)
1889{
1890
1891 if (cmpxchg(&kmemleak_error, 0, 1))
1892 return;
1893
1894
1895 kmemleak_enabled = 0;
1896
1897
1898 if (kmemleak_initialized)
1899 schedule_work(&cleanup_work);
1900 else
1901 kmemleak_free_enabled = 0;
1902
1903 pr_info("Kernel memory leak detector disabled\n");
1904}
1905
1906
1907
1908
1909static int __init kmemleak_boot_config(char *str)
1910{
1911 if (!str)
1912 return -EINVAL;
1913 if (strcmp(str, "off") == 0)
1914 kmemleak_disable();
1915 else if (strcmp(str, "on") == 0)
1916 kmemleak_skip_disable = 1;
1917 else
1918 return -EINVAL;
1919 return 0;
1920}
1921early_param("kmemleak", kmemleak_boot_config);
1922
1923
1924
1925
1926void __init kmemleak_init(void)
1927{
1928#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1929 if (!kmemleak_skip_disable) {
1930 kmemleak_disable();
1931 return;
1932 }
1933#endif
1934
1935 if (kmemleak_error)
1936 return;
1937
1938 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1939 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1940
1941 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1942 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1943
1944
1945 create_object((unsigned long)_sdata, _edata - _sdata,
1946 KMEMLEAK_GREY, GFP_ATOMIC);
1947 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1948 KMEMLEAK_GREY, GFP_ATOMIC);
1949
1950 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
1951 create_object((unsigned long)__start_ro_after_init,
1952 __end_ro_after_init - __start_ro_after_init,
1953 KMEMLEAK_GREY, GFP_ATOMIC);
1954}
1955
1956
1957
1958
1959static int __init kmemleak_late_init(void)
1960{
1961 kmemleak_initialized = 1;
1962
1963 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
1964
1965 if (kmemleak_error) {
1966
1967
1968
1969
1970
1971
1972 schedule_work(&cleanup_work);
1973 return -ENOMEM;
1974 }
1975
1976 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1977 mutex_lock(&scan_mutex);
1978 start_scan_thread();
1979 mutex_unlock(&scan_mutex);
1980 }
1981
1982 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1983 mem_pool_free_count);
1984
1985 return 0;
1986}
1987late_initcall(kmemleak_late_init);
1988