1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
73#include <linux/init.h>
74#include <linux/kernel.h>
75#include <linux/list.h>
76#include <linux/sched.h>
77#include <linux/jiffies.h>
78#include <linux/delay.h>
79#include <linux/export.h>
80#include <linux/kthread.h>
81#include <linux/rbtree.h>
82#include <linux/fs.h>
83#include <linux/debugfs.h>
84#include <linux/seq_file.h>
85#include <linux/cpumask.h>
86#include <linux/spinlock.h>
87#include <linux/mutex.h>
88#include <linux/rcupdate.h>
89#include <linux/stacktrace.h>
90#include <linux/cache.h>
91#include <linux/percpu.h>
92#include <linux/hardirq.h>
93#include <linux/mmzone.h>
94#include <linux/slab.h>
95#include <linux/thread_info.h>
96#include <linux/err.h>
97#include <linux/uaccess.h>
98#include <linux/string.h>
99#include <linux/nodemask.h>
100#include <linux/mm.h>
101#include <linux/workqueue.h>
102#include <linux/crc32.h>
103
104#include <asm/sections.h>
105#include <asm/processor.h>
106#include <linux/atomic.h>
107
108#include <linux/kasan.h>
109#include <linux/kmemcheck.h>
110#include <linux/kmemleak.h>
111#include <linux/memory_hotplug.h>
112
113
114
115
116#define MAX_TRACE 16
117#define MSECS_MIN_AGE 5000
118#define SECS_FIRST_SCAN 60
119#define SECS_SCAN_WAIT 600
120#define MAX_SCAN_SIZE 4096
121
122#define BYTES_PER_POINTER sizeof(void *)
123
124
125#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
126 __GFP_NORETRY | __GFP_NOMEMALLOC | \
127 __GFP_NOWARN)
128
129
130struct kmemleak_scan_area {
131 struct hlist_node node;
132 unsigned long start;
133 size_t size;
134};
135
136#define KMEMLEAK_GREY 0
137#define KMEMLEAK_BLACK -1
138
139
140
141
142
143
144
145
146
147struct kmemleak_object {
148 spinlock_t lock;
149 unsigned long flags;
150 struct list_head object_list;
151 struct list_head gray_list;
152 struct rb_node rb_node;
153 struct rcu_head rcu;
154
155 atomic_t use_count;
156 unsigned long pointer;
157 size_t size;
158
159 int min_count;
160
161 int count;
162
163 u32 checksum;
164
165 struct hlist_head area_list;
166 unsigned long trace[MAX_TRACE];
167 unsigned int trace_len;
168 unsigned long jiffies;
169 pid_t pid;
170 char comm[TASK_COMM_LEN];
171};
172
173
174#define OBJECT_ALLOCATED (1 << 0)
175
176#define OBJECT_REPORTED (1 << 1)
177
178#define OBJECT_NO_SCAN (1 << 2)
179
180
181#define HEX_ROW_SIZE 16
182
183#define HEX_GROUP_SIZE 1
184
185#define HEX_ASCII 1
186
187#define HEX_MAX_LINES 2
188
189
190static LIST_HEAD(object_list);
191
192static LIST_HEAD(gray_list);
193
194static struct rb_root object_tree_root = RB_ROOT;
195
196static DEFINE_RWLOCK(kmemleak_lock);
197
198
199static struct kmem_cache *object_cache;
200static struct kmem_cache *scan_area_cache;
201
202
203static int kmemleak_enabled;
204
205static int kmemleak_free_enabled;
206
207static int kmemleak_initialized;
208
209static int kmemleak_early_log = 1;
210
211static int kmemleak_warning;
212
213static int kmemleak_error;
214
215
216static unsigned long min_addr = ULONG_MAX;
217static unsigned long max_addr;
218
219static struct task_struct *scan_thread;
220
221static unsigned long jiffies_min_age;
222static unsigned long jiffies_last_scan;
223
224static signed long jiffies_scan_wait;
225
226static int kmemleak_stack_scan = 1;
227
228static DEFINE_MUTEX(scan_mutex);
229
230static int kmemleak_skip_disable;
231
232static bool kmemleak_found_leaks;
233
234
235
236
237
238
239
240
241
242
243enum {
244 KMEMLEAK_ALLOC,
245 KMEMLEAK_ALLOC_PERCPU,
246 KMEMLEAK_FREE,
247 KMEMLEAK_FREE_PART,
248 KMEMLEAK_FREE_PERCPU,
249 KMEMLEAK_NOT_LEAK,
250 KMEMLEAK_IGNORE,
251 KMEMLEAK_SCAN_AREA,
252 KMEMLEAK_NO_SCAN
253};
254
255
256
257
258
259struct early_log {
260 int op_type;
261 const void *ptr;
262 size_t size;
263 int min_count;
264 unsigned long trace[MAX_TRACE];
265 unsigned int trace_len;
266};
267
268
269static struct early_log
270 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
271static int crt_early_log __initdata;
272
273static void kmemleak_disable(void);
274
275
276
277
278#define kmemleak_warn(x...) do { \
279 pr_warn(x); \
280 dump_stack(); \
281 kmemleak_warning = 1; \
282} while (0)
283
284
285
286
287
288
289#define kmemleak_stop(x...) do { \
290 kmemleak_warn(x); \
291 kmemleak_disable(); \
292} while (0)
293
294
295
296
297
298
299
300static void hex_dump_object(struct seq_file *seq,
301 struct kmemleak_object *object)
302{
303 const u8 *ptr = (const u8 *)object->pointer;
304 size_t len;
305
306
307 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
308
309 seq_printf(seq, " hex dump (first %zu bytes):\n", len);
310 seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
311 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
312}
313
314
315
316
317
318
319
320
321
322
323
324static bool color_white(const struct kmemleak_object *object)
325{
326 return object->count != KMEMLEAK_BLACK &&
327 object->count < object->min_count;
328}
329
330static bool color_gray(const struct kmemleak_object *object)
331{
332 return object->min_count != KMEMLEAK_BLACK &&
333 object->count >= object->min_count;
334}
335
336
337
338
339
340
341static bool unreferenced_object(struct kmemleak_object *object)
342{
343 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
344 time_before_eq(object->jiffies + jiffies_min_age,
345 jiffies_last_scan);
346}
347
348
349
350
351
352static void print_unreferenced(struct seq_file *seq,
353 struct kmemleak_object *object)
354{
355 int i;
356 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
357
358 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
359 object->pointer, object->size);
360 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
361 object->comm, object->pid, object->jiffies,
362 msecs_age / 1000, msecs_age % 1000);
363 hex_dump_object(seq, object);
364 seq_printf(seq, " backtrace:\n");
365
366 for (i = 0; i < object->trace_len; i++) {
367 void *ptr = (void *)object->trace[i];
368 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
369 }
370}
371
372
373
374
375
376
377static void dump_object_info(struct kmemleak_object *object)
378{
379 struct stack_trace trace;
380
381 trace.nr_entries = object->trace_len;
382 trace.entries = object->trace;
383
384 pr_notice("Object 0x%08lx (size %zu):\n",
385 object->pointer, object->size);
386 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
387 object->comm, object->pid, object->jiffies);
388 pr_notice(" min_count = %d\n", object->min_count);
389 pr_notice(" count = %d\n", object->count);
390 pr_notice(" flags = 0x%lx\n", object->flags);
391 pr_notice(" checksum = %u\n", object->checksum);
392 pr_notice(" backtrace:\n");
393 print_stack_trace(&trace, 4);
394}
395
396
397
398
399
400
401
402static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
403{
404 struct rb_node *rb = object_tree_root.rb_node;
405
406 while (rb) {
407 struct kmemleak_object *object =
408 rb_entry(rb, struct kmemleak_object, rb_node);
409 if (ptr < object->pointer)
410 rb = object->rb_node.rb_left;
411 else if (object->pointer + object->size <= ptr)
412 rb = object->rb_node.rb_right;
413 else if (object->pointer == ptr || alias)
414 return object;
415 else {
416 kmemleak_warn("Found object by alias at 0x%08lx\n",
417 ptr);
418 dump_object_info(object);
419 break;
420 }
421 }
422 return NULL;
423}
424
425
426
427
428
429
430
431static int get_object(struct kmemleak_object *object)
432{
433 return atomic_inc_not_zero(&object->use_count);
434}
435
436
437
438
439static void free_object_rcu(struct rcu_head *rcu)
440{
441 struct hlist_node *tmp;
442 struct kmemleak_scan_area *area;
443 struct kmemleak_object *object =
444 container_of(rcu, struct kmemleak_object, rcu);
445
446
447
448
449
450 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
451 hlist_del(&area->node);
452 kmem_cache_free(scan_area_cache, area);
453 }
454 kmem_cache_free(object_cache, object);
455}
456
457
458
459
460
461
462
463
464static void put_object(struct kmemleak_object *object)
465{
466 if (!atomic_dec_and_test(&object->use_count))
467 return;
468
469
470 WARN_ON(object->flags & OBJECT_ALLOCATED);
471
472 call_rcu(&object->rcu, free_object_rcu);
473}
474
475
476
477
478static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
479{
480 unsigned long flags;
481 struct kmemleak_object *object;
482
483 rcu_read_lock();
484 read_lock_irqsave(&kmemleak_lock, flags);
485 object = lookup_object(ptr, alias);
486 read_unlock_irqrestore(&kmemleak_lock, flags);
487
488
489 if (object && !get_object(object))
490 object = NULL;
491 rcu_read_unlock();
492
493 return object;
494}
495
496
497
498
499
500
501static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
502{
503 unsigned long flags;
504 struct kmemleak_object *object;
505
506 write_lock_irqsave(&kmemleak_lock, flags);
507 object = lookup_object(ptr, alias);
508 if (object) {
509 rb_erase(&object->rb_node, &object_tree_root);
510 list_del_rcu(&object->object_list);
511 }
512 write_unlock_irqrestore(&kmemleak_lock, flags);
513
514 return object;
515}
516
517
518
519
520static int __save_stack_trace(unsigned long *trace)
521{
522 struct stack_trace stack_trace;
523
524 stack_trace.max_entries = MAX_TRACE;
525 stack_trace.nr_entries = 0;
526 stack_trace.entries = trace;
527 stack_trace.skip = 2;
528 save_stack_trace(&stack_trace);
529
530 return stack_trace.nr_entries;
531}
532
533
534
535
536
537static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
538 int min_count, gfp_t gfp)
539{
540 unsigned long flags;
541 struct kmemleak_object *object, *parent;
542 struct rb_node **link, *rb_parent;
543
544 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
545 if (!object) {
546 pr_warn("Cannot allocate a kmemleak_object structure\n");
547 kmemleak_disable();
548 return NULL;
549 }
550
551 INIT_LIST_HEAD(&object->object_list);
552 INIT_LIST_HEAD(&object->gray_list);
553 INIT_HLIST_HEAD(&object->area_list);
554 spin_lock_init(&object->lock);
555 atomic_set(&object->use_count, 1);
556 object->flags = OBJECT_ALLOCATED;
557 object->pointer = ptr;
558 object->size = size;
559 object->min_count = min_count;
560 object->count = 0;
561 object->jiffies = jiffies;
562 object->checksum = 0;
563
564
565 if (in_irq()) {
566 object->pid = 0;
567 strncpy(object->comm, "hardirq", sizeof(object->comm));
568 } else if (in_softirq()) {
569 object->pid = 0;
570 strncpy(object->comm, "softirq", sizeof(object->comm));
571 } else {
572 object->pid = current->pid;
573
574
575
576
577
578
579 strncpy(object->comm, current->comm, sizeof(object->comm));
580 }
581
582
583 object->trace_len = __save_stack_trace(object->trace);
584
585 write_lock_irqsave(&kmemleak_lock, flags);
586
587 min_addr = min(min_addr, ptr);
588 max_addr = max(max_addr, ptr + size);
589 link = &object_tree_root.rb_node;
590 rb_parent = NULL;
591 while (*link) {
592 rb_parent = *link;
593 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
594 if (ptr + size <= parent->pointer)
595 link = &parent->rb_node.rb_left;
596 else if (parent->pointer + parent->size <= ptr)
597 link = &parent->rb_node.rb_right;
598 else {
599 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
600 ptr);
601
602
603
604
605 dump_object_info(parent);
606 kmem_cache_free(object_cache, object);
607 object = NULL;
608 goto out;
609 }
610 }
611 rb_link_node(&object->rb_node, rb_parent, link);
612 rb_insert_color(&object->rb_node, &object_tree_root);
613
614 list_add_tail_rcu(&object->object_list, &object_list);
615out:
616 write_unlock_irqrestore(&kmemleak_lock, flags);
617 return object;
618}
619
620
621
622
623static void __delete_object(struct kmemleak_object *object)
624{
625 unsigned long flags;
626
627 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
628 WARN_ON(atomic_read(&object->use_count) < 1);
629
630
631
632
633
634 spin_lock_irqsave(&object->lock, flags);
635 object->flags &= ~OBJECT_ALLOCATED;
636 spin_unlock_irqrestore(&object->lock, flags);
637 put_object(object);
638}
639
640
641
642
643
644static void delete_object_full(unsigned long ptr)
645{
646 struct kmemleak_object *object;
647
648 object = find_and_remove_object(ptr, 0);
649 if (!object) {
650#ifdef DEBUG
651 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
652 ptr);
653#endif
654 return;
655 }
656 __delete_object(object);
657}
658
659
660
661
662
663
664static void delete_object_part(unsigned long ptr, size_t size)
665{
666 struct kmemleak_object *object;
667 unsigned long start, end;
668
669 object = find_and_remove_object(ptr, 1);
670 if (!object) {
671#ifdef DEBUG
672 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
673 ptr, size);
674#endif
675 return;
676 }
677
678
679
680
681
682
683
684
685 start = object->pointer;
686 end = object->pointer + object->size;
687 if (ptr > start)
688 create_object(start, ptr - start, object->min_count,
689 GFP_KERNEL);
690 if (ptr + size < end)
691 create_object(ptr + size, end - ptr - size, object->min_count,
692 GFP_KERNEL);
693
694 __delete_object(object);
695}
696
697static void __paint_it(struct kmemleak_object *object, int color)
698{
699 object->min_count = color;
700 if (color == KMEMLEAK_BLACK)
701 object->flags |= OBJECT_NO_SCAN;
702}
703
704static void paint_it(struct kmemleak_object *object, int color)
705{
706 unsigned long flags;
707
708 spin_lock_irqsave(&object->lock, flags);
709 __paint_it(object, color);
710 spin_unlock_irqrestore(&object->lock, flags);
711}
712
713static void paint_ptr(unsigned long ptr, int color)
714{
715 struct kmemleak_object *object;
716
717 object = find_and_get_object(ptr, 0);
718 if (!object) {
719 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
720 ptr,
721 (color == KMEMLEAK_GREY) ? "Grey" :
722 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
723 return;
724 }
725 paint_it(object, color);
726 put_object(object);
727}
728
729
730
731
732
733static void make_gray_object(unsigned long ptr)
734{
735 paint_ptr(ptr, KMEMLEAK_GREY);
736}
737
738
739
740
741
742static void make_black_object(unsigned long ptr)
743{
744 paint_ptr(ptr, KMEMLEAK_BLACK);
745}
746
747
748
749
750
751static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
752{
753 unsigned long flags;
754 struct kmemleak_object *object;
755 struct kmemleak_scan_area *area;
756
757 object = find_and_get_object(ptr, 1);
758 if (!object) {
759 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
760 ptr);
761 return;
762 }
763
764 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
765 if (!area) {
766 pr_warn("Cannot allocate a scan area\n");
767 goto out;
768 }
769
770 spin_lock_irqsave(&object->lock, flags);
771 if (size == SIZE_MAX) {
772 size = object->pointer + object->size - ptr;
773 } else if (ptr + size > object->pointer + object->size) {
774 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
775 dump_object_info(object);
776 kmem_cache_free(scan_area_cache, area);
777 goto out_unlock;
778 }
779
780 INIT_HLIST_NODE(&area->node);
781 area->start = ptr;
782 area->size = size;
783
784 hlist_add_head(&area->node, &object->area_list);
785out_unlock:
786 spin_unlock_irqrestore(&object->lock, flags);
787out:
788 put_object(object);
789}
790
791
792
793
794
795
796static void object_no_scan(unsigned long ptr)
797{
798 unsigned long flags;
799 struct kmemleak_object *object;
800
801 object = find_and_get_object(ptr, 0);
802 if (!object) {
803 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
804 return;
805 }
806
807 spin_lock_irqsave(&object->lock, flags);
808 object->flags |= OBJECT_NO_SCAN;
809 spin_unlock_irqrestore(&object->lock, flags);
810 put_object(object);
811}
812
813
814
815
816
817static void __init log_early(int op_type, const void *ptr, size_t size,
818 int min_count)
819{
820 unsigned long flags;
821 struct early_log *log;
822
823 if (kmemleak_error) {
824
825 crt_early_log++;
826 return;
827 }
828
829 if (crt_early_log >= ARRAY_SIZE(early_log)) {
830 crt_early_log++;
831 kmemleak_disable();
832 return;
833 }
834
835
836
837
838
839 local_irq_save(flags);
840 log = &early_log[crt_early_log];
841 log->op_type = op_type;
842 log->ptr = ptr;
843 log->size = size;
844 log->min_count = min_count;
845 log->trace_len = __save_stack_trace(log->trace);
846 crt_early_log++;
847 local_irq_restore(flags);
848}
849
850
851
852
853static void early_alloc(struct early_log *log)
854{
855 struct kmemleak_object *object;
856 unsigned long flags;
857 int i;
858
859 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
860 return;
861
862
863
864
865 rcu_read_lock();
866 object = create_object((unsigned long)log->ptr, log->size,
867 log->min_count, GFP_ATOMIC);
868 if (!object)
869 goto out;
870 spin_lock_irqsave(&object->lock, flags);
871 for (i = 0; i < log->trace_len; i++)
872 object->trace[i] = log->trace[i];
873 object->trace_len = log->trace_len;
874 spin_unlock_irqrestore(&object->lock, flags);
875out:
876 rcu_read_unlock();
877}
878
879
880
881
882static void early_alloc_percpu(struct early_log *log)
883{
884 unsigned int cpu;
885 const void __percpu *ptr = log->ptr;
886
887 for_each_possible_cpu(cpu) {
888 log->ptr = per_cpu_ptr(ptr, cpu);
889 early_alloc(log);
890 }
891}
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
908 gfp_t gfp)
909{
910 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
911
912 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
913 create_object((unsigned long)ptr, size, min_count, gfp);
914 else if (kmemleak_early_log)
915 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
916}
917EXPORT_SYMBOL_GPL(kmemleak_alloc);
918
919
920
921
922
923
924
925
926
927
928void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
929 gfp_t gfp)
930{
931 unsigned int cpu;
932
933 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
934
935
936
937
938
939 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
940 for_each_possible_cpu(cpu)
941 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
942 size, 0, gfp);
943 else if (kmemleak_early_log)
944 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
945}
946EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
947
948
949
950
951
952
953
954
955void __ref kmemleak_free(const void *ptr)
956{
957 pr_debug("%s(0x%p)\n", __func__, ptr);
958
959 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
960 delete_object_full((unsigned long)ptr);
961 else if (kmemleak_early_log)
962 log_early(KMEMLEAK_FREE, ptr, 0, 0);
963}
964EXPORT_SYMBOL_GPL(kmemleak_free);
965
966
967
968
969
970
971
972
973
974
975void __ref kmemleak_free_part(const void *ptr, size_t size)
976{
977 pr_debug("%s(0x%p)\n", __func__, ptr);
978
979 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
980 delete_object_part((unsigned long)ptr, size);
981 else if (kmemleak_early_log)
982 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
983}
984EXPORT_SYMBOL_GPL(kmemleak_free_part);
985
986
987
988
989
990
991
992
993void __ref kmemleak_free_percpu(const void __percpu *ptr)
994{
995 unsigned int cpu;
996
997 pr_debug("%s(0x%p)\n", __func__, ptr);
998
999 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1000 for_each_possible_cpu(cpu)
1001 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1002 cpu));
1003 else if (kmemleak_early_log)
1004 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1005}
1006EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1007
1008
1009
1010
1011
1012
1013
1014
1015void __ref kmemleak_update_trace(const void *ptr)
1016{
1017 struct kmemleak_object *object;
1018 unsigned long flags;
1019
1020 pr_debug("%s(0x%p)\n", __func__, ptr);
1021
1022 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1023 return;
1024
1025 object = find_and_get_object((unsigned long)ptr, 1);
1026 if (!object) {
1027#ifdef DEBUG
1028 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1029 ptr);
1030#endif
1031 return;
1032 }
1033
1034 spin_lock_irqsave(&object->lock, flags);
1035 object->trace_len = __save_stack_trace(object->trace);
1036 spin_unlock_irqrestore(&object->lock, flags);
1037
1038 put_object(object);
1039}
1040EXPORT_SYMBOL(kmemleak_update_trace);
1041
1042
1043
1044
1045
1046
1047
1048
1049void __ref kmemleak_not_leak(const void *ptr)
1050{
1051 pr_debug("%s(0x%p)\n", __func__, ptr);
1052
1053 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1054 make_gray_object((unsigned long)ptr);
1055 else if (kmemleak_early_log)
1056 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1057}
1058EXPORT_SYMBOL(kmemleak_not_leak);
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069void __ref kmemleak_ignore(const void *ptr)
1070{
1071 pr_debug("%s(0x%p)\n", __func__, ptr);
1072
1073 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1074 make_black_object((unsigned long)ptr);
1075 else if (kmemleak_early_log)
1076 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1077}
1078EXPORT_SYMBOL(kmemleak_ignore);
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1092{
1093 pr_debug("%s(0x%p)\n", __func__, ptr);
1094
1095 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1096 add_scan_area((unsigned long)ptr, size, gfp);
1097 else if (kmemleak_early_log)
1098 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1099}
1100EXPORT_SYMBOL(kmemleak_scan_area);
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111void __ref kmemleak_no_scan(const void *ptr)
1112{
1113 pr_debug("%s(0x%p)\n", __func__, ptr);
1114
1115 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1116 object_no_scan((unsigned long)ptr);
1117 else if (kmemleak_early_log)
1118 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1119}
1120EXPORT_SYMBOL(kmemleak_no_scan);
1121
1122
1123
1124
1125static bool update_checksum(struct kmemleak_object *object)
1126{
1127 u32 old_csum = object->checksum;
1128
1129 if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1130 return false;
1131
1132 kasan_disable_current();
1133 object->checksum = crc32(0, (void *)object->pointer, object->size);
1134 kasan_enable_current();
1135
1136 return object->checksum != old_csum;
1137}
1138
1139
1140
1141
1142
1143static int scan_should_stop(void)
1144{
1145 if (!kmemleak_enabled)
1146 return 1;
1147
1148
1149
1150
1151
1152 if (current->mm)
1153 return signal_pending(current);
1154 else
1155 return kthread_should_stop();
1156
1157 return 0;
1158}
1159
1160
1161
1162
1163
1164static void scan_block(void *_start, void *_end,
1165 struct kmemleak_object *scanned)
1166{
1167 unsigned long *ptr;
1168 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1169 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1170 unsigned long flags;
1171
1172 read_lock_irqsave(&kmemleak_lock, flags);
1173 for (ptr = start; ptr < end; ptr++) {
1174 struct kmemleak_object *object;
1175 unsigned long pointer;
1176
1177 if (scan_should_stop())
1178 break;
1179
1180
1181 if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1182 BYTES_PER_POINTER))
1183 continue;
1184
1185 kasan_disable_current();
1186 pointer = *ptr;
1187 kasan_enable_current();
1188
1189 if (pointer < min_addr || pointer >= max_addr)
1190 continue;
1191
1192
1193
1194
1195
1196
1197
1198 object = lookup_object(pointer, 1);
1199 if (!object)
1200 continue;
1201 if (object == scanned)
1202
1203 continue;
1204
1205
1206
1207
1208
1209
1210 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1211 if (!color_white(object)) {
1212
1213 spin_unlock(&object->lock);
1214 continue;
1215 }
1216
1217
1218
1219
1220
1221
1222
1223 object->count++;
1224 if (color_gray(object)) {
1225
1226 WARN_ON(!get_object(object));
1227 list_add_tail(&object->gray_list, &gray_list);
1228 }
1229 spin_unlock(&object->lock);
1230 }
1231 read_unlock_irqrestore(&kmemleak_lock, flags);
1232}
1233
1234
1235
1236
1237static void scan_large_block(void *start, void *end)
1238{
1239 void *next;
1240
1241 while (start < end) {
1242 next = min(start + MAX_SCAN_SIZE, end);
1243 scan_block(start, next, NULL);
1244 start = next;
1245 cond_resched();
1246 }
1247}
1248
1249
1250
1251
1252
1253static void scan_object(struct kmemleak_object *object)
1254{
1255 struct kmemleak_scan_area *area;
1256 unsigned long flags;
1257
1258
1259
1260
1261
1262 spin_lock_irqsave(&object->lock, flags);
1263 if (object->flags & OBJECT_NO_SCAN)
1264 goto out;
1265 if (!(object->flags & OBJECT_ALLOCATED))
1266
1267 goto out;
1268 if (hlist_empty(&object->area_list)) {
1269 void *start = (void *)object->pointer;
1270 void *end = (void *)(object->pointer + object->size);
1271 void *next;
1272
1273 do {
1274 next = min(start + MAX_SCAN_SIZE, end);
1275 scan_block(start, next, object);
1276
1277 start = next;
1278 if (start >= end)
1279 break;
1280
1281 spin_unlock_irqrestore(&object->lock, flags);
1282 cond_resched();
1283 spin_lock_irqsave(&object->lock, flags);
1284 } while (object->flags & OBJECT_ALLOCATED);
1285 } else
1286 hlist_for_each_entry(area, &object->area_list, node)
1287 scan_block((void *)area->start,
1288 (void *)(area->start + area->size),
1289 object);
1290out:
1291 spin_unlock_irqrestore(&object->lock, flags);
1292}
1293
1294
1295
1296
1297
1298static void scan_gray_list(void)
1299{
1300 struct kmemleak_object *object, *tmp;
1301
1302
1303
1304
1305
1306
1307 object = list_entry(gray_list.next, typeof(*object), gray_list);
1308 while (&object->gray_list != &gray_list) {
1309 cond_resched();
1310
1311
1312 if (!scan_should_stop())
1313 scan_object(object);
1314
1315 tmp = list_entry(object->gray_list.next, typeof(*object),
1316 gray_list);
1317
1318
1319 list_del(&object->gray_list);
1320 put_object(object);
1321
1322 object = tmp;
1323 }
1324 WARN_ON(!list_empty(&gray_list));
1325}
1326
1327
1328
1329
1330
1331
1332static void kmemleak_scan(void)
1333{
1334 unsigned long flags;
1335 struct kmemleak_object *object;
1336 int i;
1337 int new_leaks = 0;
1338
1339 jiffies_last_scan = jiffies;
1340
1341
1342 rcu_read_lock();
1343 list_for_each_entry_rcu(object, &object_list, object_list) {
1344 spin_lock_irqsave(&object->lock, flags);
1345#ifdef DEBUG
1346
1347
1348
1349
1350 if (atomic_read(&object->use_count) > 1) {
1351 pr_debug("object->use_count = %d\n",
1352 atomic_read(&object->use_count));
1353 dump_object_info(object);
1354 }
1355#endif
1356
1357 object->count = 0;
1358 if (color_gray(object) && get_object(object))
1359 list_add_tail(&object->gray_list, &gray_list);
1360
1361 spin_unlock_irqrestore(&object->lock, flags);
1362 }
1363 rcu_read_unlock();
1364
1365
1366 scan_large_block(_sdata, _edata);
1367 scan_large_block(__bss_start, __bss_stop);
1368
1369#ifdef CONFIG_SMP
1370
1371 for_each_possible_cpu(i)
1372 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1373 __per_cpu_end + per_cpu_offset(i));
1374#endif
1375
1376
1377
1378
1379 get_online_mems();
1380 for_each_online_node(i) {
1381 unsigned long start_pfn = node_start_pfn(i);
1382 unsigned long end_pfn = node_end_pfn(i);
1383 unsigned long pfn;
1384
1385 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1386 struct page *page;
1387
1388 if (!pfn_valid(pfn))
1389 continue;
1390 page = pfn_to_page(pfn);
1391
1392 if (page_count(page) == 0)
1393 continue;
1394 scan_block(page, page + 1, NULL);
1395 }
1396 }
1397 put_online_mems();
1398
1399
1400
1401
1402 if (kmemleak_stack_scan) {
1403 struct task_struct *p, *g;
1404
1405 read_lock(&tasklist_lock);
1406 do_each_thread(g, p) {
1407 scan_block(task_stack_page(p), task_stack_page(p) +
1408 THREAD_SIZE, NULL);
1409 } while_each_thread(g, p);
1410 read_unlock(&tasklist_lock);
1411 }
1412
1413
1414
1415
1416
1417 scan_gray_list();
1418
1419
1420
1421
1422
1423 rcu_read_lock();
1424 list_for_each_entry_rcu(object, &object_list, object_list) {
1425 spin_lock_irqsave(&object->lock, flags);
1426 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1427 && update_checksum(object) && get_object(object)) {
1428
1429 object->count = object->min_count;
1430 list_add_tail(&object->gray_list, &gray_list);
1431 }
1432 spin_unlock_irqrestore(&object->lock, flags);
1433 }
1434 rcu_read_unlock();
1435
1436
1437
1438
1439 scan_gray_list();
1440
1441
1442
1443
1444 if (scan_should_stop())
1445 return;
1446
1447
1448
1449
1450 rcu_read_lock();
1451 list_for_each_entry_rcu(object, &object_list, object_list) {
1452 spin_lock_irqsave(&object->lock, flags);
1453 if (unreferenced_object(object) &&
1454 !(object->flags & OBJECT_REPORTED)) {
1455 object->flags |= OBJECT_REPORTED;
1456 new_leaks++;
1457 }
1458 spin_unlock_irqrestore(&object->lock, flags);
1459 }
1460 rcu_read_unlock();
1461
1462 if (new_leaks) {
1463 kmemleak_found_leaks = true;
1464
1465 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1466 new_leaks);
1467 }
1468
1469}
1470
1471
1472
1473
1474
1475static int kmemleak_scan_thread(void *arg)
1476{
1477 static int first_run = 1;
1478
1479 pr_info("Automatic memory scanning thread started\n");
1480 set_user_nice(current, 10);
1481
1482
1483
1484
1485 if (first_run) {
1486 first_run = 0;
1487 ssleep(SECS_FIRST_SCAN);
1488 }
1489
1490 while (!kthread_should_stop()) {
1491 signed long timeout = jiffies_scan_wait;
1492
1493 mutex_lock(&scan_mutex);
1494 kmemleak_scan();
1495 mutex_unlock(&scan_mutex);
1496
1497
1498 while (timeout && !kthread_should_stop())
1499 timeout = schedule_timeout_interruptible(timeout);
1500 }
1501
1502 pr_info("Automatic memory scanning thread ended\n");
1503
1504 return 0;
1505}
1506
1507
1508
1509
1510
1511static void start_scan_thread(void)
1512{
1513 if (scan_thread)
1514 return;
1515 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1516 if (IS_ERR(scan_thread)) {
1517 pr_warn("Failed to create the scan thread\n");
1518 scan_thread = NULL;
1519 }
1520}
1521
1522
1523
1524
1525
1526static void stop_scan_thread(void)
1527{
1528 if (scan_thread) {
1529 kthread_stop(scan_thread);
1530 scan_thread = NULL;
1531 }
1532}
1533
1534
1535
1536
1537
1538
1539static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1540{
1541 struct kmemleak_object *object;
1542 loff_t n = *pos;
1543 int err;
1544
1545 err = mutex_lock_interruptible(&scan_mutex);
1546 if (err < 0)
1547 return ERR_PTR(err);
1548
1549 rcu_read_lock();
1550 list_for_each_entry_rcu(object, &object_list, object_list) {
1551 if (n-- > 0)
1552 continue;
1553 if (get_object(object))
1554 goto out;
1555 }
1556 object = NULL;
1557out:
1558 return object;
1559}
1560
1561
1562
1563
1564
1565static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1566{
1567 struct kmemleak_object *prev_obj = v;
1568 struct kmemleak_object *next_obj = NULL;
1569 struct kmemleak_object *obj = prev_obj;
1570
1571 ++(*pos);
1572
1573 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1574 if (get_object(obj)) {
1575 next_obj = obj;
1576 break;
1577 }
1578 }
1579
1580 put_object(prev_obj);
1581 return next_obj;
1582}
1583
1584
1585
1586
1587static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1588{
1589 if (!IS_ERR(v)) {
1590
1591
1592
1593
1594 rcu_read_unlock();
1595 mutex_unlock(&scan_mutex);
1596 if (v)
1597 put_object(v);
1598 }
1599}
1600
1601
1602
1603
1604static int kmemleak_seq_show(struct seq_file *seq, void *v)
1605{
1606 struct kmemleak_object *object = v;
1607 unsigned long flags;
1608
1609 spin_lock_irqsave(&object->lock, flags);
1610 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1611 print_unreferenced(seq, object);
1612 spin_unlock_irqrestore(&object->lock, flags);
1613 return 0;
1614}
1615
1616static const struct seq_operations kmemleak_seq_ops = {
1617 .start = kmemleak_seq_start,
1618 .next = kmemleak_seq_next,
1619 .stop = kmemleak_seq_stop,
1620 .show = kmemleak_seq_show,
1621};
1622
1623static int kmemleak_open(struct inode *inode, struct file *file)
1624{
1625 return seq_open(file, &kmemleak_seq_ops);
1626}
1627
1628static int dump_str_object_info(const char *str)
1629{
1630 unsigned long flags;
1631 struct kmemleak_object *object;
1632 unsigned long addr;
1633
1634 if (kstrtoul(str, 0, &addr))
1635 return -EINVAL;
1636 object = find_and_get_object(addr, 0);
1637 if (!object) {
1638 pr_info("Unknown object at 0x%08lx\n", addr);
1639 return -EINVAL;
1640 }
1641
1642 spin_lock_irqsave(&object->lock, flags);
1643 dump_object_info(object);
1644 spin_unlock_irqrestore(&object->lock, flags);
1645
1646 put_object(object);
1647 return 0;
1648}
1649
1650
1651
1652
1653
1654
1655
1656static void kmemleak_clear(void)
1657{
1658 struct kmemleak_object *object;
1659 unsigned long flags;
1660
1661 rcu_read_lock();
1662 list_for_each_entry_rcu(object, &object_list, object_list) {
1663 spin_lock_irqsave(&object->lock, flags);
1664 if ((object->flags & OBJECT_REPORTED) &&
1665 unreferenced_object(object))
1666 __paint_it(object, KMEMLEAK_GREY);
1667 spin_unlock_irqrestore(&object->lock, flags);
1668 }
1669 rcu_read_unlock();
1670
1671 kmemleak_found_leaks = false;
1672}
1673
1674static void __kmemleak_do_cleanup(void);
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1693 size_t size, loff_t *ppos)
1694{
1695 char buf[64];
1696 int buf_size;
1697 int ret;
1698
1699 buf_size = min(size, (sizeof(buf) - 1));
1700 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1701 return -EFAULT;
1702 buf[buf_size] = 0;
1703
1704 ret = mutex_lock_interruptible(&scan_mutex);
1705 if (ret < 0)
1706 return ret;
1707
1708 if (strncmp(buf, "clear", 5) == 0) {
1709 if (kmemleak_enabled)
1710 kmemleak_clear();
1711 else
1712 __kmemleak_do_cleanup();
1713 goto out;
1714 }
1715
1716 if (!kmemleak_enabled) {
1717 ret = -EBUSY;
1718 goto out;
1719 }
1720
1721 if (strncmp(buf, "off", 3) == 0)
1722 kmemleak_disable();
1723 else if (strncmp(buf, "stack=on", 8) == 0)
1724 kmemleak_stack_scan = 1;
1725 else if (strncmp(buf, "stack=off", 9) == 0)
1726 kmemleak_stack_scan = 0;
1727 else if (strncmp(buf, "scan=on", 7) == 0)
1728 start_scan_thread();
1729 else if (strncmp(buf, "scan=off", 8) == 0)
1730 stop_scan_thread();
1731 else if (strncmp(buf, "scan=", 5) == 0) {
1732 unsigned long secs;
1733
1734 ret = kstrtoul(buf + 5, 0, &secs);
1735 if (ret < 0)
1736 goto out;
1737 stop_scan_thread();
1738 if (secs) {
1739 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1740 start_scan_thread();
1741 }
1742 } else if (strncmp(buf, "scan", 4) == 0)
1743 kmemleak_scan();
1744 else if (strncmp(buf, "dump=", 5) == 0)
1745 ret = dump_str_object_info(buf + 5);
1746 else
1747 ret = -EINVAL;
1748
1749out:
1750 mutex_unlock(&scan_mutex);
1751 if (ret < 0)
1752 return ret;
1753
1754
1755 *ppos += size;
1756 return size;
1757}
1758
1759static const struct file_operations kmemleak_fops = {
1760 .owner = THIS_MODULE,
1761 .open = kmemleak_open,
1762 .read = seq_read,
1763 .write = kmemleak_write,
1764 .llseek = seq_lseek,
1765 .release = seq_release,
1766};
1767
1768static void __kmemleak_do_cleanup(void)
1769{
1770 struct kmemleak_object *object;
1771
1772 rcu_read_lock();
1773 list_for_each_entry_rcu(object, &object_list, object_list)
1774 delete_object_full(object->pointer);
1775 rcu_read_unlock();
1776}
1777
1778
1779
1780
1781
1782
1783static void kmemleak_do_cleanup(struct work_struct *work)
1784{
1785 stop_scan_thread();
1786
1787
1788
1789
1790
1791
1792 kmemleak_free_enabled = 0;
1793
1794 if (!kmemleak_found_leaks)
1795 __kmemleak_do_cleanup();
1796 else
1797 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1798}
1799
1800static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1801
1802
1803
1804
1805
1806static void kmemleak_disable(void)
1807{
1808
1809 if (cmpxchg(&kmemleak_error, 0, 1))
1810 return;
1811
1812
1813 kmemleak_enabled = 0;
1814
1815
1816 if (kmemleak_initialized)
1817 schedule_work(&cleanup_work);
1818 else
1819 kmemleak_free_enabled = 0;
1820
1821 pr_info("Kernel memory leak detector disabled\n");
1822}
1823
1824
1825
1826
1827static int kmemleak_boot_config(char *str)
1828{
1829 if (!str)
1830 return -EINVAL;
1831 if (strcmp(str, "off") == 0)
1832 kmemleak_disable();
1833 else if (strcmp(str, "on") == 0)
1834 kmemleak_skip_disable = 1;
1835 else
1836 return -EINVAL;
1837 return 0;
1838}
1839early_param("kmemleak", kmemleak_boot_config);
1840
1841static void __init print_log_trace(struct early_log *log)
1842{
1843 struct stack_trace trace;
1844
1845 trace.nr_entries = log->trace_len;
1846 trace.entries = log->trace;
1847
1848 pr_notice("Early log backtrace:\n");
1849 print_stack_trace(&trace, 2);
1850}
1851
1852
1853
1854
1855void __init kmemleak_init(void)
1856{
1857 int i;
1858 unsigned long flags;
1859
1860#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1861 if (!kmemleak_skip_disable) {
1862 kmemleak_early_log = 0;
1863 kmemleak_disable();
1864 return;
1865 }
1866#endif
1867
1868 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1869 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1870
1871 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1872 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1873
1874 if (crt_early_log > ARRAY_SIZE(early_log))
1875 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
1876 crt_early_log);
1877
1878
1879 local_irq_save(flags);
1880 kmemleak_early_log = 0;
1881 if (kmemleak_error) {
1882 local_irq_restore(flags);
1883 return;
1884 } else {
1885 kmemleak_enabled = 1;
1886 kmemleak_free_enabled = 1;
1887 }
1888 local_irq_restore(flags);
1889
1890
1891
1892
1893
1894
1895 for (i = 0; i < crt_early_log; i++) {
1896 struct early_log *log = &early_log[i];
1897
1898 switch (log->op_type) {
1899 case KMEMLEAK_ALLOC:
1900 early_alloc(log);
1901 break;
1902 case KMEMLEAK_ALLOC_PERCPU:
1903 early_alloc_percpu(log);
1904 break;
1905 case KMEMLEAK_FREE:
1906 kmemleak_free(log->ptr);
1907 break;
1908 case KMEMLEAK_FREE_PART:
1909 kmemleak_free_part(log->ptr, log->size);
1910 break;
1911 case KMEMLEAK_FREE_PERCPU:
1912 kmemleak_free_percpu(log->ptr);
1913 break;
1914 case KMEMLEAK_NOT_LEAK:
1915 kmemleak_not_leak(log->ptr);
1916 break;
1917 case KMEMLEAK_IGNORE:
1918 kmemleak_ignore(log->ptr);
1919 break;
1920 case KMEMLEAK_SCAN_AREA:
1921 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1922 break;
1923 case KMEMLEAK_NO_SCAN:
1924 kmemleak_no_scan(log->ptr);
1925 break;
1926 default:
1927 kmemleak_warn("Unknown early log operation: %d\n",
1928 log->op_type);
1929 }
1930
1931 if (kmemleak_warning) {
1932 print_log_trace(log);
1933 kmemleak_warning = 0;
1934 }
1935 }
1936}
1937
1938
1939
1940
1941static int __init kmemleak_late_init(void)
1942{
1943 struct dentry *dentry;
1944
1945 kmemleak_initialized = 1;
1946
1947 if (kmemleak_error) {
1948
1949
1950
1951
1952
1953
1954 schedule_work(&cleanup_work);
1955 return -ENOMEM;
1956 }
1957
1958 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1959 &kmemleak_fops);
1960 if (!dentry)
1961 pr_warn("Failed to create the debugfs kmemleak file\n");
1962 mutex_lock(&scan_mutex);
1963 start_scan_thread();
1964 mutex_unlock(&scan_mutex);
1965
1966 pr_info("Kernel memory leak detector initialized\n");
1967
1968 return 0;
1969}
1970late_initcall(kmemleak_late_init);
1971