1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#include <linux/init.h>
67#include <linux/kernel.h>
68#include <linux/list.h>
69#include <linux/sched.h>
70#include <linux/jiffies.h>
71#include <linux/delay.h>
72#include <linux/export.h>
73#include <linux/kthread.h>
74#include <linux/prio_tree.h>
75#include <linux/fs.h>
76#include <linux/debugfs.h>
77#include <linux/seq_file.h>
78#include <linux/cpumask.h>
79#include <linux/spinlock.h>
80#include <linux/mutex.h>
81#include <linux/rcupdate.h>
82#include <linux/stacktrace.h>
83#include <linux/cache.h>
84#include <linux/percpu.h>
85#include <linux/hardirq.h>
86#include <linux/mmzone.h>
87#include <linux/slab.h>
88#include <linux/thread_info.h>
89#include <linux/err.h>
90#include <linux/uaccess.h>
91#include <linux/string.h>
92#include <linux/nodemask.h>
93#include <linux/mm.h>
94#include <linux/workqueue.h>
95#include <linux/crc32.h>
96
97#include <asm/sections.h>
98#include <asm/processor.h>
99#include <linux/atomic.h>
100
101#include <linux/kmemcheck.h>
102#include <linux/kmemleak.h>
103#include <linux/memory_hotplug.h>
104
105
106
107
108#define MAX_TRACE 16
109#define MSECS_MIN_AGE 5000
110#define SECS_FIRST_SCAN 60
111#define SECS_SCAN_WAIT 600
112#define MAX_SCAN_SIZE 4096
113
114#define BYTES_PER_POINTER sizeof(void *)
115
116
117#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
118 __GFP_NORETRY | __GFP_NOMEMALLOC | \
119 __GFP_NOWARN)
120
121
122struct kmemleak_scan_area {
123 struct hlist_node node;
124 unsigned long start;
125 size_t size;
126};
127
128#define KMEMLEAK_GREY 0
129#define KMEMLEAK_BLACK -1
130
131
132
133
134
135
136
137
138
139struct kmemleak_object {
140 spinlock_t lock;
141 unsigned long flags;
142 struct list_head object_list;
143 struct list_head gray_list;
144 struct prio_tree_node tree_node;
145 struct rcu_head rcu;
146
147 atomic_t use_count;
148 unsigned long pointer;
149 size_t size;
150
151 int min_count;
152
153 int count;
154
155 u32 checksum;
156
157 struct hlist_head area_list;
158 unsigned long trace[MAX_TRACE];
159 unsigned int trace_len;
160 unsigned long jiffies;
161 pid_t pid;
162 char comm[TASK_COMM_LEN];
163};
164
165
166#define OBJECT_ALLOCATED (1 << 0)
167
168#define OBJECT_REPORTED (1 << 1)
169
170#define OBJECT_NO_SCAN (1 << 2)
171
172
173#define HEX_ROW_SIZE 16
174
175#define HEX_GROUP_SIZE 1
176
177#define HEX_ASCII 1
178
179#define HEX_MAX_LINES 2
180
181
182static LIST_HEAD(object_list);
183
184static LIST_HEAD(gray_list);
185
186static struct prio_tree_root object_tree_root;
187
188static DEFINE_RWLOCK(kmemleak_lock);
189
190
191static struct kmem_cache *object_cache;
192static struct kmem_cache *scan_area_cache;
193
194
195static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
196
197static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
198
199static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
200
201static atomic_t kmemleak_warning = ATOMIC_INIT(0);
202
203static atomic_t kmemleak_error = ATOMIC_INIT(0);
204
205
206static unsigned long min_addr = ULONG_MAX;
207static unsigned long max_addr;
208
209static struct task_struct *scan_thread;
210
211static unsigned long jiffies_min_age;
212static unsigned long jiffies_last_scan;
213
214static signed long jiffies_scan_wait;
215
216static int kmemleak_stack_scan = 1;
217
218static DEFINE_MUTEX(scan_mutex);
219
220static int kmemleak_skip_disable;
221
222
223
224
225
226
227
228
229
230
231
232enum {
233 KMEMLEAK_ALLOC,
234 KMEMLEAK_ALLOC_PERCPU,
235 KMEMLEAK_FREE,
236 KMEMLEAK_FREE_PART,
237 KMEMLEAK_FREE_PERCPU,
238 KMEMLEAK_NOT_LEAK,
239 KMEMLEAK_IGNORE,
240 KMEMLEAK_SCAN_AREA,
241 KMEMLEAK_NO_SCAN
242};
243
244
245
246
247
248struct early_log {
249 int op_type;
250 const void *ptr;
251 size_t size;
252 int min_count;
253 unsigned long trace[MAX_TRACE];
254 unsigned int trace_len;
255};
256
257
258static struct early_log
259 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
260static int crt_early_log __initdata;
261
262static void kmemleak_disable(void);
263
264
265
266
267#define kmemleak_warn(x...) do { \
268 pr_warning(x); \
269 dump_stack(); \
270 atomic_set(&kmemleak_warning, 1); \
271} while (0)
272
273
274
275
276
277
278#define kmemleak_stop(x...) do { \
279 kmemleak_warn(x); \
280 kmemleak_disable(); \
281} while (0)
282
283
284
285
286
287
288
289static void hex_dump_object(struct seq_file *seq,
290 struct kmemleak_object *object)
291{
292 const u8 *ptr = (const u8 *)object->pointer;
293 int i, len, remaining;
294 unsigned char linebuf[HEX_ROW_SIZE * 5];
295
296
297 remaining = len =
298 min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
299
300 seq_printf(seq, " hex dump (first %d bytes):\n", len);
301 for (i = 0; i < len; i += HEX_ROW_SIZE) {
302 int linelen = min(remaining, HEX_ROW_SIZE);
303
304 remaining -= HEX_ROW_SIZE;
305 hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
306 HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
307 HEX_ASCII);
308 seq_printf(seq, " %s\n", linebuf);
309 }
310}
311
312
313
314
315
316
317
318
319
320
321
322static bool color_white(const struct kmemleak_object *object)
323{
324 return object->count != KMEMLEAK_BLACK &&
325 object->count < object->min_count;
326}
327
328static bool color_gray(const struct kmemleak_object *object)
329{
330 return object->min_count != KMEMLEAK_BLACK &&
331 object->count >= object->min_count;
332}
333
334
335
336
337
338
339static bool unreferenced_object(struct kmemleak_object *object)
340{
341 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
342 time_before_eq(object->jiffies + jiffies_min_age,
343 jiffies_last_scan);
344}
345
346
347
348
349
350static void print_unreferenced(struct seq_file *seq,
351 struct kmemleak_object *object)
352{
353 int i;
354 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
355
356 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
357 object->pointer, object->size);
358 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
359 object->comm, object->pid, object->jiffies,
360 msecs_age / 1000, msecs_age % 1000);
361 hex_dump_object(seq, object);
362 seq_printf(seq, " backtrace:\n");
363
364 for (i = 0; i < object->trace_len; i++) {
365 void *ptr = (void *)object->trace[i];
366 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
367 }
368}
369
370
371
372
373
374
375static void dump_object_info(struct kmemleak_object *object)
376{
377 struct stack_trace trace;
378
379 trace.nr_entries = object->trace_len;
380 trace.entries = object->trace;
381
382 pr_notice("Object 0x%08lx (size %zu):\n",
383 object->tree_node.start, object->size);
384 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
385 object->comm, object->pid, object->jiffies);
386 pr_notice(" min_count = %d\n", object->min_count);
387 pr_notice(" count = %d\n", object->count);
388 pr_notice(" flags = 0x%lx\n", object->flags);
389 pr_notice(" checksum = %d\n", object->checksum);
390 pr_notice(" backtrace:\n");
391 print_stack_trace(&trace, 4);
392}
393
394
395
396
397
398
399
400static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
401{
402 struct prio_tree_node *node;
403 struct prio_tree_iter iter;
404 struct kmemleak_object *object;
405
406 prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
407 node = prio_tree_next(&iter);
408 if (node) {
409 object = prio_tree_entry(node, struct kmemleak_object,
410 tree_node);
411 if (!alias && object->pointer != ptr) {
412 kmemleak_warn("Found object by alias at 0x%08lx\n",
413 ptr);
414 dump_object_info(object);
415 object = NULL;
416 }
417 } else
418 object = NULL;
419
420 return object;
421}
422
423
424
425
426
427
428
429static int get_object(struct kmemleak_object *object)
430{
431 return atomic_inc_not_zero(&object->use_count);
432}
433
434
435
436
437static void free_object_rcu(struct rcu_head *rcu)
438{
439 struct hlist_node *elem, *tmp;
440 struct kmemleak_scan_area *area;
441 struct kmemleak_object *object =
442 container_of(rcu, struct kmemleak_object, rcu);
443
444
445
446
447
448 hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
449 hlist_del(elem);
450 kmem_cache_free(scan_area_cache, area);
451 }
452 kmem_cache_free(object_cache, object);
453}
454
455
456
457
458
459
460
461
462static void put_object(struct kmemleak_object *object)
463{
464 if (!atomic_dec_and_test(&object->use_count))
465 return;
466
467
468 WARN_ON(object->flags & OBJECT_ALLOCATED);
469
470 call_rcu(&object->rcu, free_object_rcu);
471}
472
473
474
475
476static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
477{
478 unsigned long flags;
479 struct kmemleak_object *object = NULL;
480
481 rcu_read_lock();
482 read_lock_irqsave(&kmemleak_lock, flags);
483 if (ptr >= min_addr && ptr < max_addr)
484 object = lookup_object(ptr, alias);
485 read_unlock_irqrestore(&kmemleak_lock, flags);
486
487
488 if (object && !get_object(object))
489 object = NULL;
490 rcu_read_unlock();
491
492 return object;
493}
494
495
496
497
498static int __save_stack_trace(unsigned long *trace)
499{
500 struct stack_trace stack_trace;
501
502 stack_trace.max_entries = MAX_TRACE;
503 stack_trace.nr_entries = 0;
504 stack_trace.entries = trace;
505 stack_trace.skip = 2;
506 save_stack_trace(&stack_trace);
507
508 return stack_trace.nr_entries;
509}
510
511
512
513
514
515static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
516 int min_count, gfp_t gfp)
517{
518 unsigned long flags;
519 struct kmemleak_object *object;
520 struct prio_tree_node *node;
521
522 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
523 if (!object) {
524 pr_warning("Cannot allocate a kmemleak_object structure\n");
525 kmemleak_disable();
526 return NULL;
527 }
528
529 INIT_LIST_HEAD(&object->object_list);
530 INIT_LIST_HEAD(&object->gray_list);
531 INIT_HLIST_HEAD(&object->area_list);
532 spin_lock_init(&object->lock);
533 atomic_set(&object->use_count, 1);
534 object->flags = OBJECT_ALLOCATED;
535 object->pointer = ptr;
536 object->size = size;
537 object->min_count = min_count;
538 object->count = 0;
539 object->jiffies = jiffies;
540 object->checksum = 0;
541
542
543 if (in_irq()) {
544 object->pid = 0;
545 strncpy(object->comm, "hardirq", sizeof(object->comm));
546 } else if (in_softirq()) {
547 object->pid = 0;
548 strncpy(object->comm, "softirq", sizeof(object->comm));
549 } else {
550 object->pid = current->pid;
551
552
553
554
555
556
557 strncpy(object->comm, current->comm, sizeof(object->comm));
558 }
559
560
561 object->trace_len = __save_stack_trace(object->trace);
562
563 INIT_PRIO_TREE_NODE(&object->tree_node);
564 object->tree_node.start = ptr;
565 object->tree_node.last = ptr + size - 1;
566
567 write_lock_irqsave(&kmemleak_lock, flags);
568
569 min_addr = min(min_addr, ptr);
570 max_addr = max(max_addr, ptr + size);
571 node = prio_tree_insert(&object_tree_root, &object->tree_node);
572
573
574
575
576
577
578 if (node != &object->tree_node) {
579 kmemleak_stop("Cannot insert 0x%lx into the object search tree "
580 "(already existing)\n", ptr);
581 object = lookup_object(ptr, 1);
582 spin_lock(&object->lock);
583 dump_object_info(object);
584 spin_unlock(&object->lock);
585
586 goto out;
587 }
588 list_add_tail_rcu(&object->object_list, &object_list);
589out:
590 write_unlock_irqrestore(&kmemleak_lock, flags);
591 return object;
592}
593
594
595
596
597
598static void __delete_object(struct kmemleak_object *object)
599{
600 unsigned long flags;
601
602 write_lock_irqsave(&kmemleak_lock, flags);
603 prio_tree_remove(&object_tree_root, &object->tree_node);
604 list_del_rcu(&object->object_list);
605 write_unlock_irqrestore(&kmemleak_lock, flags);
606
607 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
608 WARN_ON(atomic_read(&object->use_count) < 2);
609
610
611
612
613
614 spin_lock_irqsave(&object->lock, flags);
615 object->flags &= ~OBJECT_ALLOCATED;
616 spin_unlock_irqrestore(&object->lock, flags);
617 put_object(object);
618}
619
620
621
622
623
624static void delete_object_full(unsigned long ptr)
625{
626 struct kmemleak_object *object;
627
628 object = find_and_get_object(ptr, 0);
629 if (!object) {
630#ifdef DEBUG
631 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
632 ptr);
633#endif
634 return;
635 }
636 __delete_object(object);
637 put_object(object);
638}
639
640
641
642
643
644
645static void delete_object_part(unsigned long ptr, size_t size)
646{
647 struct kmemleak_object *object;
648 unsigned long start, end;
649
650 object = find_and_get_object(ptr, 1);
651 if (!object) {
652#ifdef DEBUG
653 kmemleak_warn("Partially freeing unknown object at 0x%08lx "
654 "(size %zu)\n", ptr, size);
655#endif
656 return;
657 }
658 __delete_object(object);
659
660
661
662
663
664
665
666
667 start = object->pointer;
668 end = object->pointer + object->size;
669 if (ptr > start)
670 create_object(start, ptr - start, object->min_count,
671 GFP_KERNEL);
672 if (ptr + size < end)
673 create_object(ptr + size, end - ptr - size, object->min_count,
674 GFP_KERNEL);
675
676 put_object(object);
677}
678
679static void __paint_it(struct kmemleak_object *object, int color)
680{
681 object->min_count = color;
682 if (color == KMEMLEAK_BLACK)
683 object->flags |= OBJECT_NO_SCAN;
684}
685
686static void paint_it(struct kmemleak_object *object, int color)
687{
688 unsigned long flags;
689
690 spin_lock_irqsave(&object->lock, flags);
691 __paint_it(object, color);
692 spin_unlock_irqrestore(&object->lock, flags);
693}
694
695static void paint_ptr(unsigned long ptr, int color)
696{
697 struct kmemleak_object *object;
698
699 object = find_and_get_object(ptr, 0);
700 if (!object) {
701 kmemleak_warn("Trying to color unknown object "
702 "at 0x%08lx as %s\n", ptr,
703 (color == KMEMLEAK_GREY) ? "Grey" :
704 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
705 return;
706 }
707 paint_it(object, color);
708 put_object(object);
709}
710
711
712
713
714
715static void make_gray_object(unsigned long ptr)
716{
717 paint_ptr(ptr, KMEMLEAK_GREY);
718}
719
720
721
722
723
724static void make_black_object(unsigned long ptr)
725{
726 paint_ptr(ptr, KMEMLEAK_BLACK);
727}
728
729
730
731
732
733static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
734{
735 unsigned long flags;
736 struct kmemleak_object *object;
737 struct kmemleak_scan_area *area;
738
739 object = find_and_get_object(ptr, 1);
740 if (!object) {
741 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
742 ptr);
743 return;
744 }
745
746 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
747 if (!area) {
748 pr_warning("Cannot allocate a scan area\n");
749 goto out;
750 }
751
752 spin_lock_irqsave(&object->lock, flags);
753 if (ptr + size > object->pointer + object->size) {
754 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
755 dump_object_info(object);
756 kmem_cache_free(scan_area_cache, area);
757 goto out_unlock;
758 }
759
760 INIT_HLIST_NODE(&area->node);
761 area->start = ptr;
762 area->size = size;
763
764 hlist_add_head(&area->node, &object->area_list);
765out_unlock:
766 spin_unlock_irqrestore(&object->lock, flags);
767out:
768 put_object(object);
769}
770
771
772
773
774
775
776static void object_no_scan(unsigned long ptr)
777{
778 unsigned long flags;
779 struct kmemleak_object *object;
780
781 object = find_and_get_object(ptr, 0);
782 if (!object) {
783 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
784 return;
785 }
786
787 spin_lock_irqsave(&object->lock, flags);
788 object->flags |= OBJECT_NO_SCAN;
789 spin_unlock_irqrestore(&object->lock, flags);
790 put_object(object);
791}
792
793
794
795
796
797static void __init log_early(int op_type, const void *ptr, size_t size,
798 int min_count)
799{
800 unsigned long flags;
801 struct early_log *log;
802
803 if (atomic_read(&kmemleak_error)) {
804
805 crt_early_log++;
806 return;
807 }
808
809 if (crt_early_log >= ARRAY_SIZE(early_log)) {
810 kmemleak_disable();
811 return;
812 }
813
814
815
816
817
818 local_irq_save(flags);
819 log = &early_log[crt_early_log];
820 log->op_type = op_type;
821 log->ptr = ptr;
822 log->size = size;
823 log->min_count = min_count;
824 log->trace_len = __save_stack_trace(log->trace);
825 crt_early_log++;
826 local_irq_restore(flags);
827}
828
829
830
831
832static void early_alloc(struct early_log *log)
833{
834 struct kmemleak_object *object;
835 unsigned long flags;
836 int i;
837
838 if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
839 return;
840
841
842
843
844 rcu_read_lock();
845 object = create_object((unsigned long)log->ptr, log->size,
846 log->min_count, GFP_ATOMIC);
847 if (!object)
848 goto out;
849 spin_lock_irqsave(&object->lock, flags);
850 for (i = 0; i < log->trace_len; i++)
851 object->trace[i] = log->trace[i];
852 object->trace_len = log->trace_len;
853 spin_unlock_irqrestore(&object->lock, flags);
854out:
855 rcu_read_unlock();
856}
857
858
859
860
861static void early_alloc_percpu(struct early_log *log)
862{
863 unsigned int cpu;
864 const void __percpu *ptr = log->ptr;
865
866 for_each_possible_cpu(cpu) {
867 log->ptr = per_cpu_ptr(ptr, cpu);
868 early_alloc(log);
869 }
870}
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
887 gfp_t gfp)
888{
889 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
890
891 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
892 create_object((unsigned long)ptr, size, min_count, gfp);
893 else if (atomic_read(&kmemleak_early_log))
894 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
895}
896EXPORT_SYMBOL_GPL(kmemleak_alloc);
897
898
899
900
901
902
903
904
905
906
907void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
908{
909 unsigned int cpu;
910
911 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
912
913
914
915
916
917 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
918 for_each_possible_cpu(cpu)
919 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
920 size, 0, GFP_KERNEL);
921 else if (atomic_read(&kmemleak_early_log))
922 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
923}
924EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
925
926
927
928
929
930
931
932
933void __ref kmemleak_free(const void *ptr)
934{
935 pr_debug("%s(0x%p)\n", __func__, ptr);
936
937 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
938 delete_object_full((unsigned long)ptr);
939 else if (atomic_read(&kmemleak_early_log))
940 log_early(KMEMLEAK_FREE, ptr, 0, 0);
941}
942EXPORT_SYMBOL_GPL(kmemleak_free);
943
944
945
946
947
948
949
950
951
952
953void __ref kmemleak_free_part(const void *ptr, size_t size)
954{
955 pr_debug("%s(0x%p)\n", __func__, ptr);
956
957 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
958 delete_object_part((unsigned long)ptr, size);
959 else if (atomic_read(&kmemleak_early_log))
960 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
961}
962EXPORT_SYMBOL_GPL(kmemleak_free_part);
963
964
965
966
967
968
969
970
971void __ref kmemleak_free_percpu(const void __percpu *ptr)
972{
973 unsigned int cpu;
974
975 pr_debug("%s(0x%p)\n", __func__, ptr);
976
977 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
978 for_each_possible_cpu(cpu)
979 delete_object_full((unsigned long)per_cpu_ptr(ptr,
980 cpu));
981 else if (atomic_read(&kmemleak_early_log))
982 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
983}
984EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
985
986
987
988
989
990
991
992
993void __ref kmemleak_not_leak(const void *ptr)
994{
995 pr_debug("%s(0x%p)\n", __func__, ptr);
996
997 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
998 make_gray_object((unsigned long)ptr);
999 else if (atomic_read(&kmemleak_early_log))
1000 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1001}
1002EXPORT_SYMBOL(kmemleak_not_leak);
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013void __ref kmemleak_ignore(const void *ptr)
1014{
1015 pr_debug("%s(0x%p)\n", __func__, ptr);
1016
1017 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1018 make_black_object((unsigned long)ptr);
1019 else if (atomic_read(&kmemleak_early_log))
1020 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1021}
1022EXPORT_SYMBOL(kmemleak_ignore);
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1036{
1037 pr_debug("%s(0x%p)\n", __func__, ptr);
1038
1039 if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
1040 add_scan_area((unsigned long)ptr, size, gfp);
1041 else if (atomic_read(&kmemleak_early_log))
1042 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1043}
1044EXPORT_SYMBOL(kmemleak_scan_area);
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055void __ref kmemleak_no_scan(const void *ptr)
1056{
1057 pr_debug("%s(0x%p)\n", __func__, ptr);
1058
1059 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1060 object_no_scan((unsigned long)ptr);
1061 else if (atomic_read(&kmemleak_early_log))
1062 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1063}
1064EXPORT_SYMBOL(kmemleak_no_scan);
1065
1066
1067
1068
1069static bool update_checksum(struct kmemleak_object *object)
1070{
1071 u32 old_csum = object->checksum;
1072
1073 if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1074 return false;
1075
1076 object->checksum = crc32(0, (void *)object->pointer, object->size);
1077 return object->checksum != old_csum;
1078}
1079
1080
1081
1082
1083
1084static int scan_should_stop(void)
1085{
1086 if (!atomic_read(&kmemleak_enabled))
1087 return 1;
1088
1089
1090
1091
1092
1093 if (current->mm)
1094 return signal_pending(current);
1095 else
1096 return kthread_should_stop();
1097
1098 return 0;
1099}
1100
1101
1102
1103
1104
1105static void scan_block(void *_start, void *_end,
1106 struct kmemleak_object *scanned, int allow_resched)
1107{
1108 unsigned long *ptr;
1109 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1110 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1111
1112 for (ptr = start; ptr < end; ptr++) {
1113 struct kmemleak_object *object;
1114 unsigned long flags;
1115 unsigned long pointer;
1116
1117 if (allow_resched)
1118 cond_resched();
1119 if (scan_should_stop())
1120 break;
1121
1122
1123 if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1124 BYTES_PER_POINTER))
1125 continue;
1126
1127 pointer = *ptr;
1128
1129 object = find_and_get_object(pointer, 1);
1130 if (!object)
1131 continue;
1132 if (object == scanned) {
1133
1134 put_object(object);
1135 continue;
1136 }
1137
1138
1139
1140
1141
1142
1143 spin_lock_irqsave_nested(&object->lock, flags,
1144 SINGLE_DEPTH_NESTING);
1145 if (!color_white(object)) {
1146
1147 spin_unlock_irqrestore(&object->lock, flags);
1148 put_object(object);
1149 continue;
1150 }
1151
1152
1153
1154
1155
1156
1157
1158 object->count++;
1159 if (color_gray(object)) {
1160 list_add_tail(&object->gray_list, &gray_list);
1161 spin_unlock_irqrestore(&object->lock, flags);
1162 continue;
1163 }
1164
1165 spin_unlock_irqrestore(&object->lock, flags);
1166 put_object(object);
1167 }
1168}
1169
1170
1171
1172
1173
1174static void scan_object(struct kmemleak_object *object)
1175{
1176 struct kmemleak_scan_area *area;
1177 struct hlist_node *elem;
1178 unsigned long flags;
1179
1180
1181
1182
1183
1184 spin_lock_irqsave(&object->lock, flags);
1185 if (object->flags & OBJECT_NO_SCAN)
1186 goto out;
1187 if (!(object->flags & OBJECT_ALLOCATED))
1188
1189 goto out;
1190 if (hlist_empty(&object->area_list)) {
1191 void *start = (void *)object->pointer;
1192 void *end = (void *)(object->pointer + object->size);
1193
1194 while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1195 !(object->flags & OBJECT_NO_SCAN)) {
1196 scan_block(start, min(start + MAX_SCAN_SIZE, end),
1197 object, 0);
1198 start += MAX_SCAN_SIZE;
1199
1200 spin_unlock_irqrestore(&object->lock, flags);
1201 cond_resched();
1202 spin_lock_irqsave(&object->lock, flags);
1203 }
1204 } else
1205 hlist_for_each_entry(area, elem, &object->area_list, node)
1206 scan_block((void *)area->start,
1207 (void *)(area->start + area->size),
1208 object, 0);
1209out:
1210 spin_unlock_irqrestore(&object->lock, flags);
1211}
1212
1213
1214
1215
1216
1217static void scan_gray_list(void)
1218{
1219 struct kmemleak_object *object, *tmp;
1220
1221
1222
1223
1224
1225
1226 object = list_entry(gray_list.next, typeof(*object), gray_list);
1227 while (&object->gray_list != &gray_list) {
1228 cond_resched();
1229
1230
1231 if (!scan_should_stop())
1232 scan_object(object);
1233
1234 tmp = list_entry(object->gray_list.next, typeof(*object),
1235 gray_list);
1236
1237
1238 list_del(&object->gray_list);
1239 put_object(object);
1240
1241 object = tmp;
1242 }
1243 WARN_ON(!list_empty(&gray_list));
1244}
1245
1246
1247
1248
1249
1250
1251static void kmemleak_scan(void)
1252{
1253 unsigned long flags;
1254 struct kmemleak_object *object;
1255 int i;
1256 int new_leaks = 0;
1257
1258 jiffies_last_scan = jiffies;
1259
1260
1261 rcu_read_lock();
1262 list_for_each_entry_rcu(object, &object_list, object_list) {
1263 spin_lock_irqsave(&object->lock, flags);
1264#ifdef DEBUG
1265
1266
1267
1268
1269 if (atomic_read(&object->use_count) > 1) {
1270 pr_debug("object->use_count = %d\n",
1271 atomic_read(&object->use_count));
1272 dump_object_info(object);
1273 }
1274#endif
1275
1276 object->count = 0;
1277 if (color_gray(object) && get_object(object))
1278 list_add_tail(&object->gray_list, &gray_list);
1279
1280 spin_unlock_irqrestore(&object->lock, flags);
1281 }
1282 rcu_read_unlock();
1283
1284
1285 scan_block(_sdata, _edata, NULL, 1);
1286 scan_block(__bss_start, __bss_stop, NULL, 1);
1287
1288#ifdef CONFIG_SMP
1289
1290 for_each_possible_cpu(i)
1291 scan_block(__per_cpu_start + per_cpu_offset(i),
1292 __per_cpu_end + per_cpu_offset(i), NULL, 1);
1293#endif
1294
1295
1296
1297
1298 lock_memory_hotplug();
1299 for_each_online_node(i) {
1300 pg_data_t *pgdat = NODE_DATA(i);
1301 unsigned long start_pfn = pgdat->node_start_pfn;
1302 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1303 unsigned long pfn;
1304
1305 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1306 struct page *page;
1307
1308 if (!pfn_valid(pfn))
1309 continue;
1310 page = pfn_to_page(pfn);
1311
1312 if (page_count(page) == 0)
1313 continue;
1314 scan_block(page, page + 1, NULL, 1);
1315 }
1316 }
1317 unlock_memory_hotplug();
1318
1319
1320
1321
1322 if (kmemleak_stack_scan) {
1323 struct task_struct *p, *g;
1324
1325 read_lock(&tasklist_lock);
1326 do_each_thread(g, p) {
1327 scan_block(task_stack_page(p), task_stack_page(p) +
1328 THREAD_SIZE, NULL, 0);
1329 } while_each_thread(g, p);
1330 read_unlock(&tasklist_lock);
1331 }
1332
1333
1334
1335
1336
1337 scan_gray_list();
1338
1339
1340
1341
1342
1343 rcu_read_lock();
1344 list_for_each_entry_rcu(object, &object_list, object_list) {
1345 spin_lock_irqsave(&object->lock, flags);
1346 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1347 && update_checksum(object) && get_object(object)) {
1348
1349 object->count = object->min_count;
1350 list_add_tail(&object->gray_list, &gray_list);
1351 }
1352 spin_unlock_irqrestore(&object->lock, flags);
1353 }
1354 rcu_read_unlock();
1355
1356
1357
1358
1359 scan_gray_list();
1360
1361
1362
1363
1364 if (scan_should_stop())
1365 return;
1366
1367
1368
1369
1370 rcu_read_lock();
1371 list_for_each_entry_rcu(object, &object_list, object_list) {
1372 spin_lock_irqsave(&object->lock, flags);
1373 if (unreferenced_object(object) &&
1374 !(object->flags & OBJECT_REPORTED)) {
1375 object->flags |= OBJECT_REPORTED;
1376 new_leaks++;
1377 }
1378 spin_unlock_irqrestore(&object->lock, flags);
1379 }
1380 rcu_read_unlock();
1381
1382 if (new_leaks)
1383 pr_info("%d new suspected memory leaks (see "
1384 "/sys/kernel/debug/kmemleak)\n", new_leaks);
1385
1386}
1387
1388
1389
1390
1391
1392static int kmemleak_scan_thread(void *arg)
1393{
1394 static int first_run = 1;
1395
1396 pr_info("Automatic memory scanning thread started\n");
1397 set_user_nice(current, 10);
1398
1399
1400
1401
1402 if (first_run) {
1403 first_run = 0;
1404 ssleep(SECS_FIRST_SCAN);
1405 }
1406
1407 while (!kthread_should_stop()) {
1408 signed long timeout = jiffies_scan_wait;
1409
1410 mutex_lock(&scan_mutex);
1411 kmemleak_scan();
1412 mutex_unlock(&scan_mutex);
1413
1414
1415 while (timeout && !kthread_should_stop())
1416 timeout = schedule_timeout_interruptible(timeout);
1417 }
1418
1419 pr_info("Automatic memory scanning thread ended\n");
1420
1421 return 0;
1422}
1423
1424
1425
1426
1427
1428static void start_scan_thread(void)
1429{
1430 if (scan_thread)
1431 return;
1432 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1433 if (IS_ERR(scan_thread)) {
1434 pr_warning("Failed to create the scan thread\n");
1435 scan_thread = NULL;
1436 }
1437}
1438
1439
1440
1441
1442
1443static void stop_scan_thread(void)
1444{
1445 if (scan_thread) {
1446 kthread_stop(scan_thread);
1447 scan_thread = NULL;
1448 }
1449}
1450
1451
1452
1453
1454
1455
1456static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1457{
1458 struct kmemleak_object *object;
1459 loff_t n = *pos;
1460 int err;
1461
1462 err = mutex_lock_interruptible(&scan_mutex);
1463 if (err < 0)
1464 return ERR_PTR(err);
1465
1466 rcu_read_lock();
1467 list_for_each_entry_rcu(object, &object_list, object_list) {
1468 if (n-- > 0)
1469 continue;
1470 if (get_object(object))
1471 goto out;
1472 }
1473 object = NULL;
1474out:
1475 return object;
1476}
1477
1478
1479
1480
1481
1482static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1483{
1484 struct kmemleak_object *prev_obj = v;
1485 struct kmemleak_object *next_obj = NULL;
1486 struct list_head *n = &prev_obj->object_list;
1487
1488 ++(*pos);
1489
1490 list_for_each_continue_rcu(n, &object_list) {
1491 struct kmemleak_object *obj =
1492 list_entry(n, struct kmemleak_object, object_list);
1493 if (get_object(obj)) {
1494 next_obj = obj;
1495 break;
1496 }
1497 }
1498
1499 put_object(prev_obj);
1500 return next_obj;
1501}
1502
1503
1504
1505
1506static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1507{
1508 if (!IS_ERR(v)) {
1509
1510
1511
1512
1513 rcu_read_unlock();
1514 mutex_unlock(&scan_mutex);
1515 if (v)
1516 put_object(v);
1517 }
1518}
1519
1520
1521
1522
1523static int kmemleak_seq_show(struct seq_file *seq, void *v)
1524{
1525 struct kmemleak_object *object = v;
1526 unsigned long flags;
1527
1528 spin_lock_irqsave(&object->lock, flags);
1529 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1530 print_unreferenced(seq, object);
1531 spin_unlock_irqrestore(&object->lock, flags);
1532 return 0;
1533}
1534
1535static const struct seq_operations kmemleak_seq_ops = {
1536 .start = kmemleak_seq_start,
1537 .next = kmemleak_seq_next,
1538 .stop = kmemleak_seq_stop,
1539 .show = kmemleak_seq_show,
1540};
1541
1542static int kmemleak_open(struct inode *inode, struct file *file)
1543{
1544 return seq_open(file, &kmemleak_seq_ops);
1545}
1546
1547static int kmemleak_release(struct inode *inode, struct file *file)
1548{
1549 return seq_release(inode, file);
1550}
1551
1552static int dump_str_object_info(const char *str)
1553{
1554 unsigned long flags;
1555 struct kmemleak_object *object;
1556 unsigned long addr;
1557
1558 addr= simple_strtoul(str, NULL, 0);
1559 object = find_and_get_object(addr, 0);
1560 if (!object) {
1561 pr_info("Unknown object at 0x%08lx\n", addr);
1562 return -EINVAL;
1563 }
1564
1565 spin_lock_irqsave(&object->lock, flags);
1566 dump_object_info(object);
1567 spin_unlock_irqrestore(&object->lock, flags);
1568
1569 put_object(object);
1570 return 0;
1571}
1572
1573
1574
1575
1576
1577
1578
1579static void kmemleak_clear(void)
1580{
1581 struct kmemleak_object *object;
1582 unsigned long flags;
1583
1584 rcu_read_lock();
1585 list_for_each_entry_rcu(object, &object_list, object_list) {
1586 spin_lock_irqsave(&object->lock, flags);
1587 if ((object->flags & OBJECT_REPORTED) &&
1588 unreferenced_object(object))
1589 __paint_it(object, KMEMLEAK_GREY);
1590 spin_unlock_irqrestore(&object->lock, flags);
1591 }
1592 rcu_read_unlock();
1593}
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1611 size_t size, loff_t *ppos)
1612{
1613 char buf[64];
1614 int buf_size;
1615 int ret;
1616
1617 if (!atomic_read(&kmemleak_enabled))
1618 return -EBUSY;
1619
1620 buf_size = min(size, (sizeof(buf) - 1));
1621 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1622 return -EFAULT;
1623 buf[buf_size] = 0;
1624
1625 ret = mutex_lock_interruptible(&scan_mutex);
1626 if (ret < 0)
1627 return ret;
1628
1629 if (strncmp(buf, "off", 3) == 0)
1630 kmemleak_disable();
1631 else if (strncmp(buf, "stack=on", 8) == 0)
1632 kmemleak_stack_scan = 1;
1633 else if (strncmp(buf, "stack=off", 9) == 0)
1634 kmemleak_stack_scan = 0;
1635 else if (strncmp(buf, "scan=on", 7) == 0)
1636 start_scan_thread();
1637 else if (strncmp(buf, "scan=off", 8) == 0)
1638 stop_scan_thread();
1639 else if (strncmp(buf, "scan=", 5) == 0) {
1640 unsigned long secs;
1641
1642 ret = strict_strtoul(buf + 5, 0, &secs);
1643 if (ret < 0)
1644 goto out;
1645 stop_scan_thread();
1646 if (secs) {
1647 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1648 start_scan_thread();
1649 }
1650 } else if (strncmp(buf, "scan", 4) == 0)
1651 kmemleak_scan();
1652 else if (strncmp(buf, "clear", 5) == 0)
1653 kmemleak_clear();
1654 else if (strncmp(buf, "dump=", 5) == 0)
1655 ret = dump_str_object_info(buf + 5);
1656 else
1657 ret = -EINVAL;
1658
1659out:
1660 mutex_unlock(&scan_mutex);
1661 if (ret < 0)
1662 return ret;
1663
1664
1665 *ppos += size;
1666 return size;
1667}
1668
1669static const struct file_operations kmemleak_fops = {
1670 .owner = THIS_MODULE,
1671 .open = kmemleak_open,
1672 .read = seq_read,
1673 .write = kmemleak_write,
1674 .llseek = seq_lseek,
1675 .release = kmemleak_release,
1676};
1677
1678
1679
1680
1681
1682
1683static void kmemleak_do_cleanup(struct work_struct *work)
1684{
1685 struct kmemleak_object *object;
1686 bool cleanup = scan_thread == NULL;
1687
1688 mutex_lock(&scan_mutex);
1689 stop_scan_thread();
1690
1691 if (cleanup) {
1692 rcu_read_lock();
1693 list_for_each_entry_rcu(object, &object_list, object_list)
1694 delete_object_full(object->pointer);
1695 rcu_read_unlock();
1696 }
1697 mutex_unlock(&scan_mutex);
1698}
1699
1700static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1701
1702
1703
1704
1705
1706static void kmemleak_disable(void)
1707{
1708
1709 if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1710 return;
1711
1712
1713 atomic_set(&kmemleak_enabled, 0);
1714
1715
1716 if (atomic_read(&kmemleak_initialized))
1717 schedule_work(&cleanup_work);
1718
1719 pr_info("Kernel memory leak detector disabled\n");
1720}
1721
1722
1723
1724
1725static int kmemleak_boot_config(char *str)
1726{
1727 if (!str)
1728 return -EINVAL;
1729 if (strcmp(str, "off") == 0)
1730 kmemleak_disable();
1731 else if (strcmp(str, "on") == 0)
1732 kmemleak_skip_disable = 1;
1733 else
1734 return -EINVAL;
1735 return 0;
1736}
1737early_param("kmemleak", kmemleak_boot_config);
1738
1739static void __init print_log_trace(struct early_log *log)
1740{
1741 struct stack_trace trace;
1742
1743 trace.nr_entries = log->trace_len;
1744 trace.entries = log->trace;
1745
1746 pr_notice("Early log backtrace:\n");
1747 print_stack_trace(&trace, 2);
1748}
1749
1750
1751
1752
1753void __init kmemleak_init(void)
1754{
1755 int i;
1756 unsigned long flags;
1757
1758#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1759 if (!kmemleak_skip_disable) {
1760 atomic_set(&kmemleak_early_log, 0);
1761 kmemleak_disable();
1762 return;
1763 }
1764#endif
1765
1766 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1767 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1768
1769 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1770 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1771 INIT_PRIO_TREE_ROOT(&object_tree_root);
1772
1773 if (crt_early_log >= ARRAY_SIZE(early_log))
1774 pr_warning("Early log buffer exceeded (%d), please increase "
1775 "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
1776
1777
1778 local_irq_save(flags);
1779 atomic_set(&kmemleak_early_log, 0);
1780 if (atomic_read(&kmemleak_error)) {
1781 local_irq_restore(flags);
1782 return;
1783 } else
1784 atomic_set(&kmemleak_enabled, 1);
1785 local_irq_restore(flags);
1786
1787
1788
1789
1790
1791
1792 for (i = 0; i < crt_early_log; i++) {
1793 struct early_log *log = &early_log[i];
1794
1795 switch (log->op_type) {
1796 case KMEMLEAK_ALLOC:
1797 early_alloc(log);
1798 break;
1799 case KMEMLEAK_ALLOC_PERCPU:
1800 early_alloc_percpu(log);
1801 break;
1802 case KMEMLEAK_FREE:
1803 kmemleak_free(log->ptr);
1804 break;
1805 case KMEMLEAK_FREE_PART:
1806 kmemleak_free_part(log->ptr, log->size);
1807 break;
1808 case KMEMLEAK_FREE_PERCPU:
1809 kmemleak_free_percpu(log->ptr);
1810 break;
1811 case KMEMLEAK_NOT_LEAK:
1812 kmemleak_not_leak(log->ptr);
1813 break;
1814 case KMEMLEAK_IGNORE:
1815 kmemleak_ignore(log->ptr);
1816 break;
1817 case KMEMLEAK_SCAN_AREA:
1818 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1819 break;
1820 case KMEMLEAK_NO_SCAN:
1821 kmemleak_no_scan(log->ptr);
1822 break;
1823 default:
1824 kmemleak_warn("Unknown early log operation: %d\n",
1825 log->op_type);
1826 }
1827
1828 if (atomic_read(&kmemleak_warning)) {
1829 print_log_trace(log);
1830 atomic_set(&kmemleak_warning, 0);
1831 }
1832 }
1833}
1834
1835
1836
1837
1838static int __init kmemleak_late_init(void)
1839{
1840 struct dentry *dentry;
1841
1842 atomic_set(&kmemleak_initialized, 1);
1843
1844 if (atomic_read(&kmemleak_error)) {
1845
1846
1847
1848
1849
1850
1851 schedule_work(&cleanup_work);
1852 return -ENOMEM;
1853 }
1854
1855 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1856 &kmemleak_fops);
1857 if (!dentry)
1858 pr_warning("Failed to create the debugfs kmemleak file\n");
1859 mutex_lock(&scan_mutex);
1860 start_scan_thread();
1861 mutex_unlock(&scan_mutex);
1862
1863 pr_info("Kernel memory leak detector initialized\n");
1864
1865 return 0;
1866}
1867late_initcall(kmemleak_late_init);
1868