1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59
60#include <linux/init.h>
61#include <linux/kernel.h>
62#include <linux/list.h>
63#include <linux/sched/signal.h>
64#include <linux/sched/task.h>
65#include <linux/sched/task_stack.h>
66#include <linux/jiffies.h>
67#include <linux/delay.h>
68#include <linux/export.h>
69#include <linux/kthread.h>
70#include <linux/rbtree.h>
71#include <linux/fs.h>
72#include <linux/debugfs.h>
73#include <linux/seq_file.h>
74#include <linux/cpumask.h>
75#include <linux/spinlock.h>
76#include <linux/module.h>
77#include <linux/mutex.h>
78#include <linux/rcupdate.h>
79#include <linux/stacktrace.h>
80#include <linux/cache.h>
81#include <linux/percpu.h>
82#include <linux/memblock.h>
83#include <linux/pfn.h>
84#include <linux/mmzone.h>
85#include <linux/slab.h>
86#include <linux/thread_info.h>
87#include <linux/err.h>
88#include <linux/uaccess.h>
89#include <linux/string.h>
90#include <linux/nodemask.h>
91#include <linux/mm.h>
92#include <linux/workqueue.h>
93#include <linux/crc32.h>
94
95#include <asm/sections.h>
96#include <asm/processor.h>
97#include <linux/atomic.h>
98
99#include <linux/kasan.h>
100#include <linux/kfence.h>
101#include <linux/kmemleak.h>
102#include <linux/memory_hotplug.h>
103
104
105
106
107#define MAX_TRACE 16
108#define MSECS_MIN_AGE 5000
109#define SECS_FIRST_SCAN 60
110#define SECS_SCAN_WAIT 600
111#define MAX_SCAN_SIZE 4096
112
113#define BYTES_PER_POINTER sizeof(void *)
114
115
116#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
117 __GFP_NOLOCKDEP)) | \
118 __GFP_NORETRY | __GFP_NOMEMALLOC | \
119 __GFP_NOWARN)
120
121
122struct kmemleak_scan_area {
123 struct hlist_node node;
124 unsigned long start;
125 size_t size;
126};
127
128#define KMEMLEAK_GREY 0
129#define KMEMLEAK_BLACK -1
130
131
132
133
134
135
136
137
138
139struct kmemleak_object {
140 raw_spinlock_t lock;
141 unsigned int flags;
142 struct list_head object_list;
143 struct list_head gray_list;
144 struct rb_node rb_node;
145 struct rcu_head rcu;
146
147 atomic_t use_count;
148 unsigned long pointer;
149 size_t size;
150
151 unsigned long excess_ref;
152
153 int min_count;
154
155 int count;
156
157 u32 checksum;
158
159 struct hlist_head area_list;
160 unsigned long trace[MAX_TRACE];
161 unsigned int trace_len;
162 unsigned long jiffies;
163 pid_t pid;
164 char comm[TASK_COMM_LEN];
165};
166
167
168#define OBJECT_ALLOCATED (1 << 0)
169
170#define OBJECT_REPORTED (1 << 1)
171
172#define OBJECT_NO_SCAN (1 << 2)
173
174#define OBJECT_FULL_SCAN (1 << 3)
175
176#define HEX_PREFIX " "
177
178#define HEX_ROW_SIZE 16
179
180#define HEX_GROUP_SIZE 1
181
182#define HEX_ASCII 1
183
184#define HEX_MAX_LINES 2
185
186
187static LIST_HEAD(object_list);
188
189static LIST_HEAD(gray_list);
190
191static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
192static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
193static LIST_HEAD(mem_pool_free_list);
194
195static struct rb_root object_tree_root = RB_ROOT;
196
197static DEFINE_RAW_SPINLOCK(kmemleak_lock);
198
199
200static struct kmem_cache *object_cache;
201static struct kmem_cache *scan_area_cache;
202
203
204static int kmemleak_enabled = 1;
205
206static int kmemleak_free_enabled = 1;
207
208static int kmemleak_initialized;
209
210static int kmemleak_warning;
211
212static int kmemleak_error;
213
214
215static unsigned long min_addr = ULONG_MAX;
216static unsigned long max_addr;
217
218static struct task_struct *scan_thread;
219
220static unsigned long jiffies_min_age;
221static unsigned long jiffies_last_scan;
222
223static unsigned long jiffies_scan_wait;
224
225static int kmemleak_stack_scan = 1;
226
227static DEFINE_MUTEX(scan_mutex);
228
229static int kmemleak_skip_disable;
230
231static bool kmemleak_found_leaks;
232
233static bool kmemleak_verbose;
234module_param_named(verbose, kmemleak_verbose, bool, 0600);
235
236static void kmemleak_disable(void);
237
238
239
240
241#define kmemleak_warn(x...) do { \
242 pr_warn(x); \
243 dump_stack(); \
244 kmemleak_warning = 1; \
245} while (0)
246
247
248
249
250
251
252#define kmemleak_stop(x...) do { \
253 kmemleak_warn(x); \
254 kmemleak_disable(); \
255} while (0)
256
257#define warn_or_seq_printf(seq, fmt, ...) do { \
258 if (seq) \
259 seq_printf(seq, fmt, ##__VA_ARGS__); \
260 else \
261 pr_warn(fmt, ##__VA_ARGS__); \
262} while (0)
263
264static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
265 int rowsize, int groupsize, const void *buf,
266 size_t len, bool ascii)
267{
268 if (seq)
269 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
270 buf, len, ascii);
271 else
272 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
273 rowsize, groupsize, buf, len, ascii);
274}
275
276
277
278
279
280
281
282static void hex_dump_object(struct seq_file *seq,
283 struct kmemleak_object *object)
284{
285 const u8 *ptr = (const u8 *)object->pointer;
286 size_t len;
287
288
289 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
290
291 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
292 kasan_disable_current();
293 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
294 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
295 kasan_enable_current();
296}
297
298
299
300
301
302
303
304
305
306
307
308static bool color_white(const struct kmemleak_object *object)
309{
310 return object->count != KMEMLEAK_BLACK &&
311 object->count < object->min_count;
312}
313
314static bool color_gray(const struct kmemleak_object *object)
315{
316 return object->min_count != KMEMLEAK_BLACK &&
317 object->count >= object->min_count;
318}
319
320
321
322
323
324
325static bool unreferenced_object(struct kmemleak_object *object)
326{
327 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
328 time_before_eq(object->jiffies + jiffies_min_age,
329 jiffies_last_scan);
330}
331
332
333
334
335
336static void print_unreferenced(struct seq_file *seq,
337 struct kmemleak_object *object)
338{
339 int i;
340 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
341
342 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
343 object->pointer, object->size);
344 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
345 object->comm, object->pid, object->jiffies,
346 msecs_age / 1000, msecs_age % 1000);
347 hex_dump_object(seq, object);
348 warn_or_seq_printf(seq, " backtrace:\n");
349
350 for (i = 0; i < object->trace_len; i++) {
351 void *ptr = (void *)object->trace[i];
352 warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
353 }
354}
355
356
357
358
359
360
361static void dump_object_info(struct kmemleak_object *object)
362{
363 pr_notice("Object 0x%08lx (size %zu):\n",
364 object->pointer, object->size);
365 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
366 object->comm, object->pid, object->jiffies);
367 pr_notice(" min_count = %d\n", object->min_count);
368 pr_notice(" count = %d\n", object->count);
369 pr_notice(" flags = 0x%x\n", object->flags);
370 pr_notice(" checksum = %u\n", object->checksum);
371 pr_notice(" backtrace:\n");
372 stack_trace_print(object->trace, object->trace_len, 4);
373}
374
375
376
377
378
379
380
381static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
382{
383 struct rb_node *rb = object_tree_root.rb_node;
384
385 while (rb) {
386 struct kmemleak_object *object =
387 rb_entry(rb, struct kmemleak_object, rb_node);
388 if (ptr < object->pointer)
389 rb = object->rb_node.rb_left;
390 else if (object->pointer + object->size <= ptr)
391 rb = object->rb_node.rb_right;
392 else if (object->pointer == ptr || alias)
393 return object;
394 else {
395 kmemleak_warn("Found object by alias at 0x%08lx\n",
396 ptr);
397 dump_object_info(object);
398 break;
399 }
400 }
401 return NULL;
402}
403
404
405
406
407
408
409
410static int get_object(struct kmemleak_object *object)
411{
412 return atomic_inc_not_zero(&object->use_count);
413}
414
415
416
417
418static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
419{
420 unsigned long flags;
421 struct kmemleak_object *object;
422
423
424 if (object_cache) {
425 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
426 if (object)
427 return object;
428 }
429
430
431 raw_spin_lock_irqsave(&kmemleak_lock, flags);
432 object = list_first_entry_or_null(&mem_pool_free_list,
433 typeof(*object), object_list);
434 if (object)
435 list_del(&object->object_list);
436 else if (mem_pool_free_count)
437 object = &mem_pool[--mem_pool_free_count];
438 else
439 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
440 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
441
442 return object;
443}
444
445
446
447
448static void mem_pool_free(struct kmemleak_object *object)
449{
450 unsigned long flags;
451
452 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
453 kmem_cache_free(object_cache, object);
454 return;
455 }
456
457
458 raw_spin_lock_irqsave(&kmemleak_lock, flags);
459 list_add(&object->object_list, &mem_pool_free_list);
460 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
461}
462
463
464
465
466static void free_object_rcu(struct rcu_head *rcu)
467{
468 struct hlist_node *tmp;
469 struct kmemleak_scan_area *area;
470 struct kmemleak_object *object =
471 container_of(rcu, struct kmemleak_object, rcu);
472
473
474
475
476
477 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
478 hlist_del(&area->node);
479 kmem_cache_free(scan_area_cache, area);
480 }
481 mem_pool_free(object);
482}
483
484
485
486
487
488
489
490
491static void put_object(struct kmemleak_object *object)
492{
493 if (!atomic_dec_and_test(&object->use_count))
494 return;
495
496
497 WARN_ON(object->flags & OBJECT_ALLOCATED);
498
499
500
501
502
503
504 if (object_cache)
505 call_rcu(&object->rcu, free_object_rcu);
506 else
507 free_object_rcu(&object->rcu);
508}
509
510
511
512
513static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
514{
515 unsigned long flags;
516 struct kmemleak_object *object;
517
518 rcu_read_lock();
519 raw_spin_lock_irqsave(&kmemleak_lock, flags);
520 object = lookup_object(ptr, alias);
521 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
522
523
524 if (object && !get_object(object))
525 object = NULL;
526 rcu_read_unlock();
527
528 return object;
529}
530
531
532
533
534
535static void __remove_object(struct kmemleak_object *object)
536{
537 rb_erase(&object->rb_node, &object_tree_root);
538 list_del_rcu(&object->object_list);
539}
540
541
542
543
544
545
546static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
547{
548 unsigned long flags;
549 struct kmemleak_object *object;
550
551 raw_spin_lock_irqsave(&kmemleak_lock, flags);
552 object = lookup_object(ptr, alias);
553 if (object)
554 __remove_object(object);
555 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
556
557 return object;
558}
559
560
561
562
563static int __save_stack_trace(unsigned long *trace)
564{
565 return stack_trace_save(trace, MAX_TRACE, 2);
566}
567
568
569
570
571
572static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
573 int min_count, gfp_t gfp)
574{
575 unsigned long flags;
576 struct kmemleak_object *object, *parent;
577 struct rb_node **link, *rb_parent;
578 unsigned long untagged_ptr;
579
580 object = mem_pool_alloc(gfp);
581 if (!object) {
582 pr_warn("Cannot allocate a kmemleak_object structure\n");
583 kmemleak_disable();
584 return NULL;
585 }
586
587 INIT_LIST_HEAD(&object->object_list);
588 INIT_LIST_HEAD(&object->gray_list);
589 INIT_HLIST_HEAD(&object->area_list);
590 raw_spin_lock_init(&object->lock);
591 atomic_set(&object->use_count, 1);
592 object->flags = OBJECT_ALLOCATED;
593 object->pointer = ptr;
594 object->size = kfence_ksize((void *)ptr) ?: size;
595 object->excess_ref = 0;
596 object->min_count = min_count;
597 object->count = 0;
598 object->jiffies = jiffies;
599 object->checksum = 0;
600
601
602 if (in_hardirq()) {
603 object->pid = 0;
604 strncpy(object->comm, "hardirq", sizeof(object->comm));
605 } else if (in_serving_softirq()) {
606 object->pid = 0;
607 strncpy(object->comm, "softirq", sizeof(object->comm));
608 } else {
609 object->pid = current->pid;
610
611
612
613
614
615
616 strncpy(object->comm, current->comm, sizeof(object->comm));
617 }
618
619
620 object->trace_len = __save_stack_trace(object->trace);
621
622 raw_spin_lock_irqsave(&kmemleak_lock, flags);
623
624 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
625 min_addr = min(min_addr, untagged_ptr);
626 max_addr = max(max_addr, untagged_ptr + size);
627 link = &object_tree_root.rb_node;
628 rb_parent = NULL;
629 while (*link) {
630 rb_parent = *link;
631 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
632 if (ptr + size <= parent->pointer)
633 link = &parent->rb_node.rb_left;
634 else if (parent->pointer + parent->size <= ptr)
635 link = &parent->rb_node.rb_right;
636 else {
637 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
638 ptr);
639
640
641
642
643 dump_object_info(parent);
644 kmem_cache_free(object_cache, object);
645 object = NULL;
646 goto out;
647 }
648 }
649 rb_link_node(&object->rb_node, rb_parent, link);
650 rb_insert_color(&object->rb_node, &object_tree_root);
651
652 list_add_tail_rcu(&object->object_list, &object_list);
653out:
654 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
655 return object;
656}
657
658
659
660
661static void __delete_object(struct kmemleak_object *object)
662{
663 unsigned long flags;
664
665 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
666 WARN_ON(atomic_read(&object->use_count) < 1);
667
668
669
670
671
672 raw_spin_lock_irqsave(&object->lock, flags);
673 object->flags &= ~OBJECT_ALLOCATED;
674 raw_spin_unlock_irqrestore(&object->lock, flags);
675 put_object(object);
676}
677
678
679
680
681
682static void delete_object_full(unsigned long ptr)
683{
684 struct kmemleak_object *object;
685
686 object = find_and_remove_object(ptr, 0);
687 if (!object) {
688#ifdef DEBUG
689 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
690 ptr);
691#endif
692 return;
693 }
694 __delete_object(object);
695}
696
697
698
699
700
701
702static void delete_object_part(unsigned long ptr, size_t size)
703{
704 struct kmemleak_object *object;
705 unsigned long start, end;
706
707 object = find_and_remove_object(ptr, 1);
708 if (!object) {
709#ifdef DEBUG
710 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
711 ptr, size);
712#endif
713 return;
714 }
715
716
717
718
719
720
721 start = object->pointer;
722 end = object->pointer + object->size;
723 if (ptr > start)
724 create_object(start, ptr - start, object->min_count,
725 GFP_KERNEL);
726 if (ptr + size < end)
727 create_object(ptr + size, end - ptr - size, object->min_count,
728 GFP_KERNEL);
729
730 __delete_object(object);
731}
732
733static void __paint_it(struct kmemleak_object *object, int color)
734{
735 object->min_count = color;
736 if (color == KMEMLEAK_BLACK)
737 object->flags |= OBJECT_NO_SCAN;
738}
739
740static void paint_it(struct kmemleak_object *object, int color)
741{
742 unsigned long flags;
743
744 raw_spin_lock_irqsave(&object->lock, flags);
745 __paint_it(object, color);
746 raw_spin_unlock_irqrestore(&object->lock, flags);
747}
748
749static void paint_ptr(unsigned long ptr, int color)
750{
751 struct kmemleak_object *object;
752
753 object = find_and_get_object(ptr, 0);
754 if (!object) {
755 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
756 ptr,
757 (color == KMEMLEAK_GREY) ? "Grey" :
758 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
759 return;
760 }
761 paint_it(object, color);
762 put_object(object);
763}
764
765
766
767
768
769static void make_gray_object(unsigned long ptr)
770{
771 paint_ptr(ptr, KMEMLEAK_GREY);
772}
773
774
775
776
777
778static void make_black_object(unsigned long ptr)
779{
780 paint_ptr(ptr, KMEMLEAK_BLACK);
781}
782
783
784
785
786
787static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
788{
789 unsigned long flags;
790 struct kmemleak_object *object;
791 struct kmemleak_scan_area *area = NULL;
792
793 object = find_and_get_object(ptr, 1);
794 if (!object) {
795 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
796 ptr);
797 return;
798 }
799
800 if (scan_area_cache)
801 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
802
803 raw_spin_lock_irqsave(&object->lock, flags);
804 if (!area) {
805 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
806
807 object->flags |= OBJECT_FULL_SCAN;
808 goto out_unlock;
809 }
810 if (size == SIZE_MAX) {
811 size = object->pointer + object->size - ptr;
812 } else if (ptr + size > object->pointer + object->size) {
813 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
814 dump_object_info(object);
815 kmem_cache_free(scan_area_cache, area);
816 goto out_unlock;
817 }
818
819 INIT_HLIST_NODE(&area->node);
820 area->start = ptr;
821 area->size = size;
822
823 hlist_add_head(&area->node, &object->area_list);
824out_unlock:
825 raw_spin_unlock_irqrestore(&object->lock, flags);
826 put_object(object);
827}
828
829
830
831
832
833
834
835static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
836{
837 unsigned long flags;
838 struct kmemleak_object *object;
839
840 object = find_and_get_object(ptr, 0);
841 if (!object) {
842 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
843 ptr);
844 return;
845 }
846
847 raw_spin_lock_irqsave(&object->lock, flags);
848 object->excess_ref = excess_ref;
849 raw_spin_unlock_irqrestore(&object->lock, flags);
850 put_object(object);
851}
852
853
854
855
856
857
858static void object_no_scan(unsigned long ptr)
859{
860 unsigned long flags;
861 struct kmemleak_object *object;
862
863 object = find_and_get_object(ptr, 0);
864 if (!object) {
865 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
866 return;
867 }
868
869 raw_spin_lock_irqsave(&object->lock, flags);
870 object->flags |= OBJECT_NO_SCAN;
871 raw_spin_unlock_irqrestore(&object->lock, flags);
872 put_object(object);
873}
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
890 gfp_t gfp)
891{
892 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
893
894 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
895 create_object((unsigned long)ptr, size, min_count, gfp);
896}
897EXPORT_SYMBOL_GPL(kmemleak_alloc);
898
899
900
901
902
903
904
905
906
907
908void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
909 gfp_t gfp)
910{
911 unsigned int cpu;
912
913 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
914
915
916
917
918
919 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
920 for_each_possible_cpu(cpu)
921 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
922 size, 0, gfp);
923}
924EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
925
926
927
928
929
930
931
932
933
934
935void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
936{
937 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
938
939
940
941
942
943 if (kmemleak_enabled) {
944 create_object((unsigned long)area->addr, size, 2, gfp);
945 object_set_excess_ref((unsigned long)area,
946 (unsigned long)area->addr);
947 }
948}
949EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
950
951
952
953
954
955
956
957
958void __ref kmemleak_free(const void *ptr)
959{
960 pr_debug("%s(0x%p)\n", __func__, ptr);
961
962 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
963 delete_object_full((unsigned long)ptr);
964}
965EXPORT_SYMBOL_GPL(kmemleak_free);
966
967
968
969
970
971
972
973
974
975
976void __ref kmemleak_free_part(const void *ptr, size_t size)
977{
978 pr_debug("%s(0x%p)\n", __func__, ptr);
979
980 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
981 delete_object_part((unsigned long)ptr, size);
982}
983EXPORT_SYMBOL_GPL(kmemleak_free_part);
984
985
986
987
988
989
990
991
992void __ref kmemleak_free_percpu(const void __percpu *ptr)
993{
994 unsigned int cpu;
995
996 pr_debug("%s(0x%p)\n", __func__, ptr);
997
998 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
999 for_each_possible_cpu(cpu)
1000 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1001 cpu));
1002}
1003EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1004
1005
1006
1007
1008
1009
1010
1011
1012void __ref kmemleak_update_trace(const void *ptr)
1013{
1014 struct kmemleak_object *object;
1015 unsigned long flags;
1016
1017 pr_debug("%s(0x%p)\n", __func__, ptr);
1018
1019 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1020 return;
1021
1022 object = find_and_get_object((unsigned long)ptr, 1);
1023 if (!object) {
1024#ifdef DEBUG
1025 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1026 ptr);
1027#endif
1028 return;
1029 }
1030
1031 raw_spin_lock_irqsave(&object->lock, flags);
1032 object->trace_len = __save_stack_trace(object->trace);
1033 raw_spin_unlock_irqrestore(&object->lock, flags);
1034
1035 put_object(object);
1036}
1037EXPORT_SYMBOL(kmemleak_update_trace);
1038
1039
1040
1041
1042
1043
1044
1045
1046void __ref kmemleak_not_leak(const void *ptr)
1047{
1048 pr_debug("%s(0x%p)\n", __func__, ptr);
1049
1050 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1051 make_gray_object((unsigned long)ptr);
1052}
1053EXPORT_SYMBOL(kmemleak_not_leak);
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064void __ref kmemleak_ignore(const void *ptr)
1065{
1066 pr_debug("%s(0x%p)\n", __func__, ptr);
1067
1068 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1069 make_black_object((unsigned long)ptr);
1070}
1071EXPORT_SYMBOL(kmemleak_ignore);
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1085{
1086 pr_debug("%s(0x%p)\n", __func__, ptr);
1087
1088 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1089 add_scan_area((unsigned long)ptr, size, gfp);
1090}
1091EXPORT_SYMBOL(kmemleak_scan_area);
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102void __ref kmemleak_no_scan(const void *ptr)
1103{
1104 pr_debug("%s(0x%p)\n", __func__, ptr);
1105
1106 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1107 object_no_scan((unsigned long)ptr);
1108}
1109EXPORT_SYMBOL(kmemleak_no_scan);
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1121 gfp_t gfp)
1122{
1123 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1124 kmemleak_alloc(__va(phys), size, min_count, gfp);
1125}
1126EXPORT_SYMBOL(kmemleak_alloc_phys);
1127
1128
1129
1130
1131
1132
1133
1134
1135void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1136{
1137 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1138 kmemleak_free_part(__va(phys), size);
1139}
1140EXPORT_SYMBOL(kmemleak_free_part_phys);
1141
1142
1143
1144
1145
1146
1147void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1148{
1149 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1150 kmemleak_not_leak(__va(phys));
1151}
1152EXPORT_SYMBOL(kmemleak_not_leak_phys);
1153
1154
1155
1156
1157
1158
1159void __ref kmemleak_ignore_phys(phys_addr_t phys)
1160{
1161 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1162 kmemleak_ignore(__va(phys));
1163}
1164EXPORT_SYMBOL(kmemleak_ignore_phys);
1165
1166
1167
1168
1169static bool update_checksum(struct kmemleak_object *object)
1170{
1171 u32 old_csum = object->checksum;
1172
1173 kasan_disable_current();
1174 kcsan_disable_current();
1175 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1176 kasan_enable_current();
1177 kcsan_enable_current();
1178
1179 return object->checksum != old_csum;
1180}
1181
1182
1183
1184
1185static void update_refs(struct kmemleak_object *object)
1186{
1187 if (!color_white(object)) {
1188
1189 return;
1190 }
1191
1192
1193
1194
1195
1196
1197
1198 object->count++;
1199 if (color_gray(object)) {
1200
1201 WARN_ON(!get_object(object));
1202 list_add_tail(&object->gray_list, &gray_list);
1203 }
1204}
1205
1206
1207
1208
1209
1210static int scan_should_stop(void)
1211{
1212 if (!kmemleak_enabled)
1213 return 1;
1214
1215
1216
1217
1218
1219 if (current->mm)
1220 return signal_pending(current);
1221 else
1222 return kthread_should_stop();
1223
1224 return 0;
1225}
1226
1227
1228
1229
1230
1231static void scan_block(void *_start, void *_end,
1232 struct kmemleak_object *scanned)
1233{
1234 unsigned long *ptr;
1235 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1236 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1237 unsigned long flags;
1238 unsigned long untagged_ptr;
1239
1240 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1241 for (ptr = start; ptr < end; ptr++) {
1242 struct kmemleak_object *object;
1243 unsigned long pointer;
1244 unsigned long excess_ref;
1245
1246 if (scan_should_stop())
1247 break;
1248
1249 kasan_disable_current();
1250 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1251 kasan_enable_current();
1252
1253 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1254 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1255 continue;
1256
1257
1258
1259
1260
1261
1262
1263 object = lookup_object(pointer, 1);
1264 if (!object)
1265 continue;
1266 if (object == scanned)
1267
1268 continue;
1269
1270
1271
1272
1273
1274
1275 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1276
1277 if (color_gray(object)) {
1278 excess_ref = object->excess_ref;
1279
1280 } else {
1281 excess_ref = 0;
1282 update_refs(object);
1283 }
1284 raw_spin_unlock(&object->lock);
1285
1286 if (excess_ref) {
1287 object = lookup_object(excess_ref, 0);
1288 if (!object)
1289 continue;
1290 if (object == scanned)
1291
1292 continue;
1293 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1294 update_refs(object);
1295 raw_spin_unlock(&object->lock);
1296 }
1297 }
1298 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1299}
1300
1301
1302
1303
1304#ifdef CONFIG_SMP
1305static void scan_large_block(void *start, void *end)
1306{
1307 void *next;
1308
1309 while (start < end) {
1310 next = min(start + MAX_SCAN_SIZE, end);
1311 scan_block(start, next, NULL);
1312 start = next;
1313 cond_resched();
1314 }
1315}
1316#endif
1317
1318
1319
1320
1321
1322static void scan_object(struct kmemleak_object *object)
1323{
1324 struct kmemleak_scan_area *area;
1325 unsigned long flags;
1326
1327
1328
1329
1330
1331 raw_spin_lock_irqsave(&object->lock, flags);
1332 if (object->flags & OBJECT_NO_SCAN)
1333 goto out;
1334 if (!(object->flags & OBJECT_ALLOCATED))
1335
1336 goto out;
1337 if (hlist_empty(&object->area_list) ||
1338 object->flags & OBJECT_FULL_SCAN) {
1339 void *start = (void *)object->pointer;
1340 void *end = (void *)(object->pointer + object->size);
1341 void *next;
1342
1343 do {
1344 next = min(start + MAX_SCAN_SIZE, end);
1345 scan_block(start, next, object);
1346
1347 start = next;
1348 if (start >= end)
1349 break;
1350
1351 raw_spin_unlock_irqrestore(&object->lock, flags);
1352 cond_resched();
1353 raw_spin_lock_irqsave(&object->lock, flags);
1354 } while (object->flags & OBJECT_ALLOCATED);
1355 } else
1356 hlist_for_each_entry(area, &object->area_list, node)
1357 scan_block((void *)area->start,
1358 (void *)(area->start + area->size),
1359 object);
1360out:
1361 raw_spin_unlock_irqrestore(&object->lock, flags);
1362}
1363
1364
1365
1366
1367
1368static void scan_gray_list(void)
1369{
1370 struct kmemleak_object *object, *tmp;
1371
1372
1373
1374
1375
1376
1377 object = list_entry(gray_list.next, typeof(*object), gray_list);
1378 while (&object->gray_list != &gray_list) {
1379 cond_resched();
1380
1381
1382 if (!scan_should_stop())
1383 scan_object(object);
1384
1385 tmp = list_entry(object->gray_list.next, typeof(*object),
1386 gray_list);
1387
1388
1389 list_del(&object->gray_list);
1390 put_object(object);
1391
1392 object = tmp;
1393 }
1394 WARN_ON(!list_empty(&gray_list));
1395}
1396
1397
1398
1399
1400
1401
1402static void kmemleak_scan(void)
1403{
1404 unsigned long flags;
1405 struct kmemleak_object *object;
1406 int i;
1407 int new_leaks = 0;
1408
1409 jiffies_last_scan = jiffies;
1410
1411
1412 rcu_read_lock();
1413 list_for_each_entry_rcu(object, &object_list, object_list) {
1414 raw_spin_lock_irqsave(&object->lock, flags);
1415#ifdef DEBUG
1416
1417
1418
1419
1420 if (atomic_read(&object->use_count) > 1) {
1421 pr_debug("object->use_count = %d\n",
1422 atomic_read(&object->use_count));
1423 dump_object_info(object);
1424 }
1425#endif
1426
1427 object->count = 0;
1428 if (color_gray(object) && get_object(object))
1429 list_add_tail(&object->gray_list, &gray_list);
1430
1431 raw_spin_unlock_irqrestore(&object->lock, flags);
1432 }
1433 rcu_read_unlock();
1434
1435#ifdef CONFIG_SMP
1436
1437 for_each_possible_cpu(i)
1438 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1439 __per_cpu_end + per_cpu_offset(i));
1440#endif
1441
1442
1443
1444
1445 get_online_mems();
1446 for_each_online_node(i) {
1447 unsigned long start_pfn = node_start_pfn(i);
1448 unsigned long end_pfn = node_end_pfn(i);
1449 unsigned long pfn;
1450
1451 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1452 struct page *page = pfn_to_online_page(pfn);
1453
1454 if (!page)
1455 continue;
1456
1457
1458 if (page_to_nid(page) != i)
1459 continue;
1460
1461 if (page_count(page) == 0)
1462 continue;
1463 scan_block(page, page + 1, NULL);
1464 if (!(pfn & 63))
1465 cond_resched();
1466 }
1467 }
1468 put_online_mems();
1469
1470
1471
1472
1473 if (kmemleak_stack_scan) {
1474 struct task_struct *p, *g;
1475
1476 rcu_read_lock();
1477 for_each_process_thread(g, p) {
1478 void *stack = try_get_task_stack(p);
1479 if (stack) {
1480 scan_block(stack, stack + THREAD_SIZE, NULL);
1481 put_task_stack(p);
1482 }
1483 }
1484 rcu_read_unlock();
1485 }
1486
1487
1488
1489
1490
1491 scan_gray_list();
1492
1493
1494
1495
1496
1497 rcu_read_lock();
1498 list_for_each_entry_rcu(object, &object_list, object_list) {
1499 raw_spin_lock_irqsave(&object->lock, flags);
1500 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1501 && update_checksum(object) && get_object(object)) {
1502
1503 object->count = object->min_count;
1504 list_add_tail(&object->gray_list, &gray_list);
1505 }
1506 raw_spin_unlock_irqrestore(&object->lock, flags);
1507 }
1508 rcu_read_unlock();
1509
1510
1511
1512
1513 scan_gray_list();
1514
1515
1516
1517
1518 if (scan_should_stop())
1519 return;
1520
1521
1522
1523
1524 rcu_read_lock();
1525 list_for_each_entry_rcu(object, &object_list, object_list) {
1526 raw_spin_lock_irqsave(&object->lock, flags);
1527 if (unreferenced_object(object) &&
1528 !(object->flags & OBJECT_REPORTED)) {
1529 object->flags |= OBJECT_REPORTED;
1530
1531 if (kmemleak_verbose)
1532 print_unreferenced(NULL, object);
1533
1534 new_leaks++;
1535 }
1536 raw_spin_unlock_irqrestore(&object->lock, flags);
1537 }
1538 rcu_read_unlock();
1539
1540 if (new_leaks) {
1541 kmemleak_found_leaks = true;
1542
1543 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1544 new_leaks);
1545 }
1546
1547}
1548
1549
1550
1551
1552
1553static int kmemleak_scan_thread(void *arg)
1554{
1555 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1556
1557 pr_info("Automatic memory scanning thread started\n");
1558 set_user_nice(current, 10);
1559
1560
1561
1562
1563 if (first_run) {
1564 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1565 first_run = 0;
1566 while (timeout && !kthread_should_stop())
1567 timeout = schedule_timeout_interruptible(timeout);
1568 }
1569
1570 while (!kthread_should_stop()) {
1571 signed long timeout = READ_ONCE(jiffies_scan_wait);
1572
1573 mutex_lock(&scan_mutex);
1574 kmemleak_scan();
1575 mutex_unlock(&scan_mutex);
1576
1577
1578 while (timeout && !kthread_should_stop())
1579 timeout = schedule_timeout_interruptible(timeout);
1580 }
1581
1582 pr_info("Automatic memory scanning thread ended\n");
1583
1584 return 0;
1585}
1586
1587
1588
1589
1590
1591static void start_scan_thread(void)
1592{
1593 if (scan_thread)
1594 return;
1595 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1596 if (IS_ERR(scan_thread)) {
1597 pr_warn("Failed to create the scan thread\n");
1598 scan_thread = NULL;
1599 }
1600}
1601
1602
1603
1604
1605static void stop_scan_thread(void)
1606{
1607 if (scan_thread) {
1608 kthread_stop(scan_thread);
1609 scan_thread = NULL;
1610 }
1611}
1612
1613
1614
1615
1616
1617
1618static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1619{
1620 struct kmemleak_object *object;
1621 loff_t n = *pos;
1622 int err;
1623
1624 err = mutex_lock_interruptible(&scan_mutex);
1625 if (err < 0)
1626 return ERR_PTR(err);
1627
1628 rcu_read_lock();
1629 list_for_each_entry_rcu(object, &object_list, object_list) {
1630 if (n-- > 0)
1631 continue;
1632 if (get_object(object))
1633 goto out;
1634 }
1635 object = NULL;
1636out:
1637 return object;
1638}
1639
1640
1641
1642
1643
1644static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1645{
1646 struct kmemleak_object *prev_obj = v;
1647 struct kmemleak_object *next_obj = NULL;
1648 struct kmemleak_object *obj = prev_obj;
1649
1650 ++(*pos);
1651
1652 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1653 if (get_object(obj)) {
1654 next_obj = obj;
1655 break;
1656 }
1657 }
1658
1659 put_object(prev_obj);
1660 return next_obj;
1661}
1662
1663
1664
1665
1666static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1667{
1668 if (!IS_ERR(v)) {
1669
1670
1671
1672
1673 rcu_read_unlock();
1674 mutex_unlock(&scan_mutex);
1675 if (v)
1676 put_object(v);
1677 }
1678}
1679
1680
1681
1682
1683static int kmemleak_seq_show(struct seq_file *seq, void *v)
1684{
1685 struct kmemleak_object *object = v;
1686 unsigned long flags;
1687
1688 raw_spin_lock_irqsave(&object->lock, flags);
1689 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1690 print_unreferenced(seq, object);
1691 raw_spin_unlock_irqrestore(&object->lock, flags);
1692 return 0;
1693}
1694
1695static const struct seq_operations kmemleak_seq_ops = {
1696 .start = kmemleak_seq_start,
1697 .next = kmemleak_seq_next,
1698 .stop = kmemleak_seq_stop,
1699 .show = kmemleak_seq_show,
1700};
1701
1702static int kmemleak_open(struct inode *inode, struct file *file)
1703{
1704 return seq_open(file, &kmemleak_seq_ops);
1705}
1706
1707static int dump_str_object_info(const char *str)
1708{
1709 unsigned long flags;
1710 struct kmemleak_object *object;
1711 unsigned long addr;
1712
1713 if (kstrtoul(str, 0, &addr))
1714 return -EINVAL;
1715 object = find_and_get_object(addr, 0);
1716 if (!object) {
1717 pr_info("Unknown object at 0x%08lx\n", addr);
1718 return -EINVAL;
1719 }
1720
1721 raw_spin_lock_irqsave(&object->lock, flags);
1722 dump_object_info(object);
1723 raw_spin_unlock_irqrestore(&object->lock, flags);
1724
1725 put_object(object);
1726 return 0;
1727}
1728
1729
1730
1731
1732
1733
1734
1735static void kmemleak_clear(void)
1736{
1737 struct kmemleak_object *object;
1738 unsigned long flags;
1739
1740 rcu_read_lock();
1741 list_for_each_entry_rcu(object, &object_list, object_list) {
1742 raw_spin_lock_irqsave(&object->lock, flags);
1743 if ((object->flags & OBJECT_REPORTED) &&
1744 unreferenced_object(object))
1745 __paint_it(object, KMEMLEAK_GREY);
1746 raw_spin_unlock_irqrestore(&object->lock, flags);
1747 }
1748 rcu_read_unlock();
1749
1750 kmemleak_found_leaks = false;
1751}
1752
1753static void __kmemleak_do_cleanup(void);
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1772 size_t size, loff_t *ppos)
1773{
1774 char buf[64];
1775 int buf_size;
1776 int ret;
1777
1778 buf_size = min(size, (sizeof(buf) - 1));
1779 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1780 return -EFAULT;
1781 buf[buf_size] = 0;
1782
1783 ret = mutex_lock_interruptible(&scan_mutex);
1784 if (ret < 0)
1785 return ret;
1786
1787 if (strncmp(buf, "clear", 5) == 0) {
1788 if (kmemleak_enabled)
1789 kmemleak_clear();
1790 else
1791 __kmemleak_do_cleanup();
1792 goto out;
1793 }
1794
1795 if (!kmemleak_enabled) {
1796 ret = -EPERM;
1797 goto out;
1798 }
1799
1800 if (strncmp(buf, "off", 3) == 0)
1801 kmemleak_disable();
1802 else if (strncmp(buf, "stack=on", 8) == 0)
1803 kmemleak_stack_scan = 1;
1804 else if (strncmp(buf, "stack=off", 9) == 0)
1805 kmemleak_stack_scan = 0;
1806 else if (strncmp(buf, "scan=on", 7) == 0)
1807 start_scan_thread();
1808 else if (strncmp(buf, "scan=off", 8) == 0)
1809 stop_scan_thread();
1810 else if (strncmp(buf, "scan=", 5) == 0) {
1811 unsigned secs;
1812 unsigned long msecs;
1813
1814 ret = kstrtouint(buf + 5, 0, &secs);
1815 if (ret < 0)
1816 goto out;
1817
1818 msecs = secs * MSEC_PER_SEC;
1819 if (msecs > UINT_MAX)
1820 msecs = UINT_MAX;
1821
1822 stop_scan_thread();
1823 if (msecs) {
1824 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
1825 start_scan_thread();
1826 }
1827 } else if (strncmp(buf, "scan", 4) == 0)
1828 kmemleak_scan();
1829 else if (strncmp(buf, "dump=", 5) == 0)
1830 ret = dump_str_object_info(buf + 5);
1831 else
1832 ret = -EINVAL;
1833
1834out:
1835 mutex_unlock(&scan_mutex);
1836 if (ret < 0)
1837 return ret;
1838
1839
1840 *ppos += size;
1841 return size;
1842}
1843
1844static const struct file_operations kmemleak_fops = {
1845 .owner = THIS_MODULE,
1846 .open = kmemleak_open,
1847 .read = seq_read,
1848 .write = kmemleak_write,
1849 .llseek = seq_lseek,
1850 .release = seq_release,
1851};
1852
1853static void __kmemleak_do_cleanup(void)
1854{
1855 struct kmemleak_object *object, *tmp;
1856
1857
1858
1859
1860
1861 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1862 __remove_object(object);
1863 __delete_object(object);
1864 }
1865}
1866
1867
1868
1869
1870
1871
1872static void kmemleak_do_cleanup(struct work_struct *work)
1873{
1874 stop_scan_thread();
1875
1876 mutex_lock(&scan_mutex);
1877
1878
1879
1880
1881
1882
1883 kmemleak_free_enabled = 0;
1884 mutex_unlock(&scan_mutex);
1885
1886 if (!kmemleak_found_leaks)
1887 __kmemleak_do_cleanup();
1888 else
1889 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1890}
1891
1892static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1893
1894
1895
1896
1897
1898static void kmemleak_disable(void)
1899{
1900
1901 if (cmpxchg(&kmemleak_error, 0, 1))
1902 return;
1903
1904
1905 kmemleak_enabled = 0;
1906
1907
1908 if (kmemleak_initialized)
1909 schedule_work(&cleanup_work);
1910 else
1911 kmemleak_free_enabled = 0;
1912
1913 pr_info("Kernel memory leak detector disabled\n");
1914}
1915
1916
1917
1918
1919static int __init kmemleak_boot_config(char *str)
1920{
1921 if (!str)
1922 return -EINVAL;
1923 if (strcmp(str, "off") == 0)
1924 kmemleak_disable();
1925 else if (strcmp(str, "on") == 0)
1926 kmemleak_skip_disable = 1;
1927 else
1928 return -EINVAL;
1929 return 0;
1930}
1931early_param("kmemleak", kmemleak_boot_config);
1932
1933
1934
1935
1936void __init kmemleak_init(void)
1937{
1938#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1939 if (!kmemleak_skip_disable) {
1940 kmemleak_disable();
1941 return;
1942 }
1943#endif
1944
1945 if (kmemleak_error)
1946 return;
1947
1948 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1949 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1950
1951 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1952 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1953
1954
1955 create_object((unsigned long)_sdata, _edata - _sdata,
1956 KMEMLEAK_GREY, GFP_ATOMIC);
1957 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1958 KMEMLEAK_GREY, GFP_ATOMIC);
1959
1960 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
1961 create_object((unsigned long)__start_ro_after_init,
1962 __end_ro_after_init - __start_ro_after_init,
1963 KMEMLEAK_GREY, GFP_ATOMIC);
1964}
1965
1966
1967
1968
1969static int __init kmemleak_late_init(void)
1970{
1971 kmemleak_initialized = 1;
1972
1973 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
1974
1975 if (kmemleak_error) {
1976
1977
1978
1979
1980
1981
1982 schedule_work(&cleanup_work);
1983 return -ENOMEM;
1984 }
1985
1986 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1987 mutex_lock(&scan_mutex);
1988 start_scan_thread();
1989 mutex_unlock(&scan_mutex);
1990 }
1991
1992 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1993 mem_pool_free_count);
1994
1995 return 0;
1996}
1997late_initcall(kmemleak_late_init);
1998