1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#include <linux/init.h>
67#include <linux/kernel.h>
68#include <linux/list.h>
69#include <linux/sched.h>
70#include <linux/jiffies.h>
71#include <linux/delay.h>
72#include <linux/export.h>
73#include <linux/kthread.h>
74#include <linux/rbtree.h>
75#include <linux/fs.h>
76#include <linux/debugfs.h>
77#include <linux/seq_file.h>
78#include <linux/cpumask.h>
79#include <linux/spinlock.h>
80#include <linux/mutex.h>
81#include <linux/rcupdate.h>
82#include <linux/stacktrace.h>
83#include <linux/cache.h>
84#include <linux/percpu.h>
85#include <linux/hardirq.h>
86#include <linux/mmzone.h>
87#include <linux/slab.h>
88#include <linux/thread_info.h>
89#include <linux/err.h>
90#include <linux/uaccess.h>
91#include <linux/string.h>
92#include <linux/nodemask.h>
93#include <linux/mm.h>
94#include <linux/workqueue.h>
95#include <linux/crc32.h>
96
97#include <asm/sections.h>
98#include <asm/processor.h>
99#include <linux/atomic.h>
100
101#include <linux/kmemcheck.h>
102#include <linux/kmemleak.h>
103#include <linux/memory_hotplug.h>
104
105
106
107
108#define MAX_TRACE 16
109#define MSECS_MIN_AGE 5000
110#define SECS_FIRST_SCAN 60
111#define SECS_SCAN_WAIT 600
112#define MAX_SCAN_SIZE 4096
113
114#define BYTES_PER_POINTER sizeof(void *)
115
116
117#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
118 __GFP_NORETRY | __GFP_NOMEMALLOC | \
119 __GFP_NOWARN)
120
121
122struct kmemleak_scan_area {
123 struct hlist_node node;
124 unsigned long start;
125 size_t size;
126};
127
128#define KMEMLEAK_GREY 0
129#define KMEMLEAK_BLACK -1
130
131
132
133
134
135
136
137
138
139struct kmemleak_object {
140 spinlock_t lock;
141 unsigned long flags;
142 struct list_head object_list;
143 struct list_head gray_list;
144 struct rb_node rb_node;
145 struct rcu_head rcu;
146
147 atomic_t use_count;
148 unsigned long pointer;
149 size_t size;
150
151 int min_count;
152
153 int count;
154
155 u32 checksum;
156
157 struct hlist_head area_list;
158 unsigned long trace[MAX_TRACE];
159 unsigned int trace_len;
160 unsigned long jiffies;
161 pid_t pid;
162 char comm[TASK_COMM_LEN];
163};
164
165
166#define OBJECT_ALLOCATED (1 << 0)
167
168#define OBJECT_REPORTED (1 << 1)
169
170#define OBJECT_NO_SCAN (1 << 2)
171
172
173#define HEX_ROW_SIZE 16
174
175#define HEX_GROUP_SIZE 1
176
177#define HEX_ASCII 1
178
179#define HEX_MAX_LINES 2
180
181
182static LIST_HEAD(object_list);
183
184static LIST_HEAD(gray_list);
185
186static struct rb_root object_tree_root = RB_ROOT;
187
188static DEFINE_RWLOCK(kmemleak_lock);
189
190
191static struct kmem_cache *object_cache;
192static struct kmem_cache *scan_area_cache;
193
194
195static int kmemleak_enabled;
196
197static int kmemleak_initialized;
198
199static int kmemleak_early_log = 1;
200
201static int kmemleak_warning;
202
203static int kmemleak_error;
204
205
206static unsigned long min_addr = ULONG_MAX;
207static unsigned long max_addr;
208
209static struct task_struct *scan_thread;
210
211static unsigned long jiffies_min_age;
212static unsigned long jiffies_last_scan;
213
214static signed long jiffies_scan_wait;
215
216static int kmemleak_stack_scan = 1;
217
218static DEFINE_MUTEX(scan_mutex);
219
220static int kmemleak_skip_disable;
221
222static bool kmemleak_found_leaks;
223
224
225
226
227
228
229
230
231
232
233enum {
234 KMEMLEAK_ALLOC,
235 KMEMLEAK_ALLOC_PERCPU,
236 KMEMLEAK_FREE,
237 KMEMLEAK_FREE_PART,
238 KMEMLEAK_FREE_PERCPU,
239 KMEMLEAK_NOT_LEAK,
240 KMEMLEAK_IGNORE,
241 KMEMLEAK_SCAN_AREA,
242 KMEMLEAK_NO_SCAN
243};
244
245
246
247
248
249struct early_log {
250 int op_type;
251 const void *ptr;
252 size_t size;
253 int min_count;
254 unsigned long trace[MAX_TRACE];
255 unsigned int trace_len;
256};
257
258
259static struct early_log
260 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
261static int crt_early_log __initdata;
262
263static void kmemleak_disable(void);
264
265
266
267
268#define kmemleak_warn(x...) do { \
269 pr_warning(x); \
270 dump_stack(); \
271 kmemleak_warning = 1; \
272} while (0)
273
274
275
276
277
278
279#define kmemleak_stop(x...) do { \
280 kmemleak_warn(x); \
281 kmemleak_disable(); \
282} while (0)
283
284
285
286
287
288
289
290static void hex_dump_object(struct seq_file *seq,
291 struct kmemleak_object *object)
292{
293 const u8 *ptr = (const u8 *)object->pointer;
294 int i, len, remaining;
295 unsigned char linebuf[HEX_ROW_SIZE * 5];
296
297
298 remaining = len =
299 min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
300
301 seq_printf(seq, " hex dump (first %d bytes):\n", len);
302 for (i = 0; i < len; i += HEX_ROW_SIZE) {
303 int linelen = min(remaining, HEX_ROW_SIZE);
304
305 remaining -= HEX_ROW_SIZE;
306 hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
307 HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
308 HEX_ASCII);
309 seq_printf(seq, " %s\n", linebuf);
310 }
311}
312
313
314
315
316
317
318
319
320
321
322
323static bool color_white(const struct kmemleak_object *object)
324{
325 return object->count != KMEMLEAK_BLACK &&
326 object->count < object->min_count;
327}
328
329static bool color_gray(const struct kmemleak_object *object)
330{
331 return object->min_count != KMEMLEAK_BLACK &&
332 object->count >= object->min_count;
333}
334
335
336
337
338
339
340static bool unreferenced_object(struct kmemleak_object *object)
341{
342 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
343 time_before_eq(object->jiffies + jiffies_min_age,
344 jiffies_last_scan);
345}
346
347
348
349
350
351static void print_unreferenced(struct seq_file *seq,
352 struct kmemleak_object *object)
353{
354 int i;
355 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
356
357 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
358 object->pointer, object->size);
359 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
360 object->comm, object->pid, object->jiffies,
361 msecs_age / 1000, msecs_age % 1000);
362 hex_dump_object(seq, object);
363 seq_printf(seq, " backtrace:\n");
364
365 for (i = 0; i < object->trace_len; i++) {
366 void *ptr = (void *)object->trace[i];
367 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
368 }
369}
370
371
372
373
374
375
376static void dump_object_info(struct kmemleak_object *object)
377{
378 struct stack_trace trace;
379
380 trace.nr_entries = object->trace_len;
381 trace.entries = object->trace;
382
383 pr_notice("Object 0x%08lx (size %zu):\n",
384 object->pointer, object->size);
385 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
386 object->comm, object->pid, object->jiffies);
387 pr_notice(" min_count = %d\n", object->min_count);
388 pr_notice(" count = %d\n", object->count);
389 pr_notice(" flags = 0x%lx\n", object->flags);
390 pr_notice(" checksum = %u\n", object->checksum);
391 pr_notice(" backtrace:\n");
392 print_stack_trace(&trace, 4);
393}
394
395
396
397
398
399
400
401static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
402{
403 struct rb_node *rb = object_tree_root.rb_node;
404
405 while (rb) {
406 struct kmemleak_object *object =
407 rb_entry(rb, struct kmemleak_object, rb_node);
408 if (ptr < object->pointer)
409 rb = object->rb_node.rb_left;
410 else if (object->pointer + object->size <= ptr)
411 rb = object->rb_node.rb_right;
412 else if (object->pointer == ptr || alias)
413 return object;
414 else {
415 kmemleak_warn("Found object by alias at 0x%08lx\n",
416 ptr);
417 dump_object_info(object);
418 break;
419 }
420 }
421 return NULL;
422}
423
424
425
426
427
428
429
430static int get_object(struct kmemleak_object *object)
431{
432 return atomic_inc_not_zero(&object->use_count);
433}
434
435
436
437
438static void free_object_rcu(struct rcu_head *rcu)
439{
440 struct hlist_node *tmp;
441 struct kmemleak_scan_area *area;
442 struct kmemleak_object *object =
443 container_of(rcu, struct kmemleak_object, rcu);
444
445
446
447
448
449 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
450 hlist_del(&area->node);
451 kmem_cache_free(scan_area_cache, area);
452 }
453 kmem_cache_free(object_cache, object);
454}
455
456
457
458
459
460
461
462
463static void put_object(struct kmemleak_object *object)
464{
465 if (!atomic_dec_and_test(&object->use_count))
466 return;
467
468
469 WARN_ON(object->flags & OBJECT_ALLOCATED);
470
471 call_rcu(&object->rcu, free_object_rcu);
472}
473
474
475
476
477static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
478{
479 unsigned long flags;
480 struct kmemleak_object *object = NULL;
481
482 rcu_read_lock();
483 read_lock_irqsave(&kmemleak_lock, flags);
484 if (ptr >= min_addr && ptr < max_addr)
485 object = lookup_object(ptr, alias);
486 read_unlock_irqrestore(&kmemleak_lock, flags);
487
488
489 if (object && !get_object(object))
490 object = NULL;
491 rcu_read_unlock();
492
493 return object;
494}
495
496
497
498
499static int __save_stack_trace(unsigned long *trace)
500{
501 struct stack_trace stack_trace;
502
503 stack_trace.max_entries = MAX_TRACE;
504 stack_trace.nr_entries = 0;
505 stack_trace.entries = trace;
506 stack_trace.skip = 2;
507 save_stack_trace(&stack_trace);
508
509 return stack_trace.nr_entries;
510}
511
512
513
514
515
516static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
517 int min_count, gfp_t gfp)
518{
519 unsigned long flags;
520 struct kmemleak_object *object, *parent;
521 struct rb_node **link, *rb_parent;
522
523 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
524 if (!object) {
525 pr_warning("Cannot allocate a kmemleak_object structure\n");
526 kmemleak_disable();
527 return NULL;
528 }
529
530 INIT_LIST_HEAD(&object->object_list);
531 INIT_LIST_HEAD(&object->gray_list);
532 INIT_HLIST_HEAD(&object->area_list);
533 spin_lock_init(&object->lock);
534 atomic_set(&object->use_count, 1);
535 object->flags = OBJECT_ALLOCATED;
536 object->pointer = ptr;
537 object->size = size;
538 object->min_count = min_count;
539 object->count = 0;
540 object->jiffies = jiffies;
541 object->checksum = 0;
542
543
544 if (in_irq()) {
545 object->pid = 0;
546 strncpy(object->comm, "hardirq", sizeof(object->comm));
547 } else if (in_softirq()) {
548 object->pid = 0;
549 strncpy(object->comm, "softirq", sizeof(object->comm));
550 } else {
551 object->pid = current->pid;
552
553
554
555
556
557
558 strncpy(object->comm, current->comm, sizeof(object->comm));
559 }
560
561
562 object->trace_len = __save_stack_trace(object->trace);
563
564 write_lock_irqsave(&kmemleak_lock, flags);
565
566 min_addr = min(min_addr, ptr);
567 max_addr = max(max_addr, ptr + size);
568 link = &object_tree_root.rb_node;
569 rb_parent = NULL;
570 while (*link) {
571 rb_parent = *link;
572 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
573 if (ptr + size <= parent->pointer)
574 link = &parent->rb_node.rb_left;
575 else if (parent->pointer + parent->size <= ptr)
576 link = &parent->rb_node.rb_right;
577 else {
578 kmemleak_stop("Cannot insert 0x%lx into the object "
579 "search tree (overlaps existing)\n",
580 ptr);
581 kmem_cache_free(object_cache, object);
582 object = parent;
583 spin_lock(&object->lock);
584 dump_object_info(object);
585 spin_unlock(&object->lock);
586 goto out;
587 }
588 }
589 rb_link_node(&object->rb_node, rb_parent, link);
590 rb_insert_color(&object->rb_node, &object_tree_root);
591
592 list_add_tail_rcu(&object->object_list, &object_list);
593out:
594 write_unlock_irqrestore(&kmemleak_lock, flags);
595 return object;
596}
597
598
599
600
601
602static void __delete_object(struct kmemleak_object *object)
603{
604 unsigned long flags;
605
606 write_lock_irqsave(&kmemleak_lock, flags);
607 rb_erase(&object->rb_node, &object_tree_root);
608 list_del_rcu(&object->object_list);
609 write_unlock_irqrestore(&kmemleak_lock, flags);
610
611 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
612 WARN_ON(atomic_read(&object->use_count) < 2);
613
614
615
616
617
618 spin_lock_irqsave(&object->lock, flags);
619 object->flags &= ~OBJECT_ALLOCATED;
620 spin_unlock_irqrestore(&object->lock, flags);
621 put_object(object);
622}
623
624
625
626
627
628static void delete_object_full(unsigned long ptr)
629{
630 struct kmemleak_object *object;
631
632 object = find_and_get_object(ptr, 0);
633 if (!object) {
634#ifdef DEBUG
635 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
636 ptr);
637#endif
638 return;
639 }
640 __delete_object(object);
641 put_object(object);
642}
643
644
645
646
647
648
649static void delete_object_part(unsigned long ptr, size_t size)
650{
651 struct kmemleak_object *object;
652 unsigned long start, end;
653
654 object = find_and_get_object(ptr, 1);
655 if (!object) {
656#ifdef DEBUG
657 kmemleak_warn("Partially freeing unknown object at 0x%08lx "
658 "(size %zu)\n", ptr, size);
659#endif
660 return;
661 }
662 __delete_object(object);
663
664
665
666
667
668
669
670
671 start = object->pointer;
672 end = object->pointer + object->size;
673 if (ptr > start)
674 create_object(start, ptr - start, object->min_count,
675 GFP_KERNEL);
676 if (ptr + size < end)
677 create_object(ptr + size, end - ptr - size, object->min_count,
678 GFP_KERNEL);
679
680 put_object(object);
681}
682
683static void __paint_it(struct kmemleak_object *object, int color)
684{
685 object->min_count = color;
686 if (color == KMEMLEAK_BLACK)
687 object->flags |= OBJECT_NO_SCAN;
688}
689
690static void paint_it(struct kmemleak_object *object, int color)
691{
692 unsigned long flags;
693
694 spin_lock_irqsave(&object->lock, flags);
695 __paint_it(object, color);
696 spin_unlock_irqrestore(&object->lock, flags);
697}
698
699static void paint_ptr(unsigned long ptr, int color)
700{
701 struct kmemleak_object *object;
702
703 object = find_and_get_object(ptr, 0);
704 if (!object) {
705 kmemleak_warn("Trying to color unknown object "
706 "at 0x%08lx as %s\n", ptr,
707 (color == KMEMLEAK_GREY) ? "Grey" :
708 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
709 return;
710 }
711 paint_it(object, color);
712 put_object(object);
713}
714
715
716
717
718
719static void make_gray_object(unsigned long ptr)
720{
721 paint_ptr(ptr, KMEMLEAK_GREY);
722}
723
724
725
726
727
728static void make_black_object(unsigned long ptr)
729{
730 paint_ptr(ptr, KMEMLEAK_BLACK);
731}
732
733
734
735
736
737static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
738{
739 unsigned long flags;
740 struct kmemleak_object *object;
741 struct kmemleak_scan_area *area;
742
743 object = find_and_get_object(ptr, 1);
744 if (!object) {
745 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
746 ptr);
747 return;
748 }
749
750 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
751 if (!area) {
752 pr_warning("Cannot allocate a scan area\n");
753 goto out;
754 }
755
756 spin_lock_irqsave(&object->lock, flags);
757 if (size == SIZE_MAX) {
758 size = object->pointer + object->size - ptr;
759 } else if (ptr + size > object->pointer + object->size) {
760 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
761 dump_object_info(object);
762 kmem_cache_free(scan_area_cache, area);
763 goto out_unlock;
764 }
765
766 INIT_HLIST_NODE(&area->node);
767 area->start = ptr;
768 area->size = size;
769
770 hlist_add_head(&area->node, &object->area_list);
771out_unlock:
772 spin_unlock_irqrestore(&object->lock, flags);
773out:
774 put_object(object);
775}
776
777
778
779
780
781
782static void object_no_scan(unsigned long ptr)
783{
784 unsigned long flags;
785 struct kmemleak_object *object;
786
787 object = find_and_get_object(ptr, 0);
788 if (!object) {
789 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
790 return;
791 }
792
793 spin_lock_irqsave(&object->lock, flags);
794 object->flags |= OBJECT_NO_SCAN;
795 spin_unlock_irqrestore(&object->lock, flags);
796 put_object(object);
797}
798
799
800
801
802
803static void __init log_early(int op_type, const void *ptr, size_t size,
804 int min_count)
805{
806 unsigned long flags;
807 struct early_log *log;
808
809 if (kmemleak_error) {
810
811 crt_early_log++;
812 return;
813 }
814
815 if (crt_early_log >= ARRAY_SIZE(early_log)) {
816 kmemleak_disable();
817 return;
818 }
819
820
821
822
823
824 local_irq_save(flags);
825 log = &early_log[crt_early_log];
826 log->op_type = op_type;
827 log->ptr = ptr;
828 log->size = size;
829 log->min_count = min_count;
830 log->trace_len = __save_stack_trace(log->trace);
831 crt_early_log++;
832 local_irq_restore(flags);
833}
834
835
836
837
838static void early_alloc(struct early_log *log)
839{
840 struct kmemleak_object *object;
841 unsigned long flags;
842 int i;
843
844 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
845 return;
846
847
848
849
850 rcu_read_lock();
851 object = create_object((unsigned long)log->ptr, log->size,
852 log->min_count, GFP_ATOMIC);
853 if (!object)
854 goto out;
855 spin_lock_irqsave(&object->lock, flags);
856 for (i = 0; i < log->trace_len; i++)
857 object->trace[i] = log->trace[i];
858 object->trace_len = log->trace_len;
859 spin_unlock_irqrestore(&object->lock, flags);
860out:
861 rcu_read_unlock();
862}
863
864
865
866
867static void early_alloc_percpu(struct early_log *log)
868{
869 unsigned int cpu;
870 const void __percpu *ptr = log->ptr;
871
872 for_each_possible_cpu(cpu) {
873 log->ptr = per_cpu_ptr(ptr, cpu);
874 early_alloc(log);
875 }
876}
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
893 gfp_t gfp)
894{
895 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
896
897 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
898 create_object((unsigned long)ptr, size, min_count, gfp);
899 else if (kmemleak_early_log)
900 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
901}
902EXPORT_SYMBOL_GPL(kmemleak_alloc);
903
904
905
906
907
908
909
910
911
912
913void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
914{
915 unsigned int cpu;
916
917 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
918
919
920
921
922
923 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
924 for_each_possible_cpu(cpu)
925 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
926 size, 0, GFP_KERNEL);
927 else if (kmemleak_early_log)
928 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
929}
930EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
931
932
933
934
935
936
937
938
939void __ref kmemleak_free(const void *ptr)
940{
941 pr_debug("%s(0x%p)\n", __func__, ptr);
942
943 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
944 delete_object_full((unsigned long)ptr);
945 else if (kmemleak_early_log)
946 log_early(KMEMLEAK_FREE, ptr, 0, 0);
947}
948EXPORT_SYMBOL_GPL(kmemleak_free);
949
950
951
952
953
954
955
956
957
958
959void __ref kmemleak_free_part(const void *ptr, size_t size)
960{
961 pr_debug("%s(0x%p)\n", __func__, ptr);
962
963 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
964 delete_object_part((unsigned long)ptr, size);
965 else if (kmemleak_early_log)
966 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
967}
968EXPORT_SYMBOL_GPL(kmemleak_free_part);
969
970
971
972
973
974
975
976
977void __ref kmemleak_free_percpu(const void __percpu *ptr)
978{
979 unsigned int cpu;
980
981 pr_debug("%s(0x%p)\n", __func__, ptr);
982
983 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
984 for_each_possible_cpu(cpu)
985 delete_object_full((unsigned long)per_cpu_ptr(ptr,
986 cpu));
987 else if (kmemleak_early_log)
988 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
989}
990EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
991
992
993
994
995
996
997
998
999void __ref kmemleak_update_trace(const void *ptr)
1000{
1001 struct kmemleak_object *object;
1002 unsigned long flags;
1003
1004 pr_debug("%s(0x%p)\n", __func__, ptr);
1005
1006 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1007 return;
1008
1009 object = find_and_get_object((unsigned long)ptr, 1);
1010 if (!object) {
1011#ifdef DEBUG
1012 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1013 ptr);
1014#endif
1015 return;
1016 }
1017
1018 spin_lock_irqsave(&object->lock, flags);
1019 object->trace_len = __save_stack_trace(object->trace);
1020 spin_unlock_irqrestore(&object->lock, flags);
1021
1022 put_object(object);
1023}
1024EXPORT_SYMBOL(kmemleak_update_trace);
1025
1026
1027
1028
1029
1030
1031
1032
1033void __ref kmemleak_not_leak(const void *ptr)
1034{
1035 pr_debug("%s(0x%p)\n", __func__, ptr);
1036
1037 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1038 make_gray_object((unsigned long)ptr);
1039 else if (kmemleak_early_log)
1040 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1041}
1042EXPORT_SYMBOL(kmemleak_not_leak);
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053void __ref kmemleak_ignore(const void *ptr)
1054{
1055 pr_debug("%s(0x%p)\n", __func__, ptr);
1056
1057 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1058 make_black_object((unsigned long)ptr);
1059 else if (kmemleak_early_log)
1060 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1061}
1062EXPORT_SYMBOL(kmemleak_ignore);
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1076{
1077 pr_debug("%s(0x%p)\n", __func__, ptr);
1078
1079 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1080 add_scan_area((unsigned long)ptr, size, gfp);
1081 else if (kmemleak_early_log)
1082 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1083}
1084EXPORT_SYMBOL(kmemleak_scan_area);
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095void __ref kmemleak_no_scan(const void *ptr)
1096{
1097 pr_debug("%s(0x%p)\n", __func__, ptr);
1098
1099 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1100 object_no_scan((unsigned long)ptr);
1101 else if (kmemleak_early_log)
1102 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1103}
1104EXPORT_SYMBOL(kmemleak_no_scan);
1105
1106
1107
1108
1109static bool update_checksum(struct kmemleak_object *object)
1110{
1111 u32 old_csum = object->checksum;
1112
1113 if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1114 return false;
1115
1116 object->checksum = crc32(0, (void *)object->pointer, object->size);
1117 return object->checksum != old_csum;
1118}
1119
1120
1121
1122
1123
1124static int scan_should_stop(void)
1125{
1126 if (!kmemleak_enabled)
1127 return 1;
1128
1129
1130
1131
1132
1133 if (current->mm)
1134 return signal_pending(current);
1135 else
1136 return kthread_should_stop();
1137
1138 return 0;
1139}
1140
1141
1142
1143
1144
1145static void scan_block(void *_start, void *_end,
1146 struct kmemleak_object *scanned, int allow_resched)
1147{
1148 unsigned long *ptr;
1149 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1150 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1151
1152 for (ptr = start; ptr < end; ptr++) {
1153 struct kmemleak_object *object;
1154 unsigned long flags;
1155 unsigned long pointer;
1156
1157 if (allow_resched)
1158 cond_resched();
1159 if (scan_should_stop())
1160 break;
1161
1162
1163 if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1164 BYTES_PER_POINTER))
1165 continue;
1166
1167 pointer = *ptr;
1168
1169 object = find_and_get_object(pointer, 1);
1170 if (!object)
1171 continue;
1172 if (object == scanned) {
1173
1174 put_object(object);
1175 continue;
1176 }
1177
1178
1179
1180
1181
1182
1183 spin_lock_irqsave_nested(&object->lock, flags,
1184 SINGLE_DEPTH_NESTING);
1185 if (!color_white(object)) {
1186
1187 spin_unlock_irqrestore(&object->lock, flags);
1188 put_object(object);
1189 continue;
1190 }
1191
1192
1193
1194
1195
1196
1197
1198 object->count++;
1199 if (color_gray(object)) {
1200 list_add_tail(&object->gray_list, &gray_list);
1201 spin_unlock_irqrestore(&object->lock, flags);
1202 continue;
1203 }
1204
1205 spin_unlock_irqrestore(&object->lock, flags);
1206 put_object(object);
1207 }
1208}
1209
1210
1211
1212
1213
1214static void scan_object(struct kmemleak_object *object)
1215{
1216 struct kmemleak_scan_area *area;
1217 unsigned long flags;
1218
1219
1220
1221
1222
1223 spin_lock_irqsave(&object->lock, flags);
1224 if (object->flags & OBJECT_NO_SCAN)
1225 goto out;
1226 if (!(object->flags & OBJECT_ALLOCATED))
1227
1228 goto out;
1229 if (hlist_empty(&object->area_list)) {
1230 void *start = (void *)object->pointer;
1231 void *end = (void *)(object->pointer + object->size);
1232
1233 while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1234 !(object->flags & OBJECT_NO_SCAN)) {
1235 scan_block(start, min(start + MAX_SCAN_SIZE, end),
1236 object, 0);
1237 start += MAX_SCAN_SIZE;
1238
1239 spin_unlock_irqrestore(&object->lock, flags);
1240 cond_resched();
1241 spin_lock_irqsave(&object->lock, flags);
1242 }
1243 } else
1244 hlist_for_each_entry(area, &object->area_list, node)
1245 scan_block((void *)area->start,
1246 (void *)(area->start + area->size),
1247 object, 0);
1248out:
1249 spin_unlock_irqrestore(&object->lock, flags);
1250}
1251
1252
1253
1254
1255
1256static void scan_gray_list(void)
1257{
1258 struct kmemleak_object *object, *tmp;
1259
1260
1261
1262
1263
1264
1265 object = list_entry(gray_list.next, typeof(*object), gray_list);
1266 while (&object->gray_list != &gray_list) {
1267 cond_resched();
1268
1269
1270 if (!scan_should_stop())
1271 scan_object(object);
1272
1273 tmp = list_entry(object->gray_list.next, typeof(*object),
1274 gray_list);
1275
1276
1277 list_del(&object->gray_list);
1278 put_object(object);
1279
1280 object = tmp;
1281 }
1282 WARN_ON(!list_empty(&gray_list));
1283}
1284
1285
1286
1287
1288
1289
1290static void kmemleak_scan(void)
1291{
1292 unsigned long flags;
1293 struct kmemleak_object *object;
1294 int i;
1295 int new_leaks = 0;
1296
1297 jiffies_last_scan = jiffies;
1298
1299
1300 rcu_read_lock();
1301 list_for_each_entry_rcu(object, &object_list, object_list) {
1302 spin_lock_irqsave(&object->lock, flags);
1303#ifdef DEBUG
1304
1305
1306
1307
1308 if (atomic_read(&object->use_count) > 1) {
1309 pr_debug("object->use_count = %d\n",
1310 atomic_read(&object->use_count));
1311 dump_object_info(object);
1312 }
1313#endif
1314
1315 object->count = 0;
1316 if (color_gray(object) && get_object(object))
1317 list_add_tail(&object->gray_list, &gray_list);
1318
1319 spin_unlock_irqrestore(&object->lock, flags);
1320 }
1321 rcu_read_unlock();
1322
1323
1324 scan_block(_sdata, _edata, NULL, 1);
1325 scan_block(__bss_start, __bss_stop, NULL, 1);
1326
1327#ifdef CONFIG_SMP
1328
1329 for_each_possible_cpu(i)
1330 scan_block(__per_cpu_start + per_cpu_offset(i),
1331 __per_cpu_end + per_cpu_offset(i), NULL, 1);
1332#endif
1333
1334
1335
1336
1337 get_online_mems();
1338 for_each_online_node(i) {
1339 unsigned long start_pfn = node_start_pfn(i);
1340 unsigned long end_pfn = node_end_pfn(i);
1341 unsigned long pfn;
1342
1343 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1344 struct page *page;
1345
1346 if (!pfn_valid(pfn))
1347 continue;
1348 page = pfn_to_page(pfn);
1349
1350 if (page_count(page) == 0)
1351 continue;
1352 scan_block(page, page + 1, NULL, 1);
1353 }
1354 }
1355 put_online_mems();
1356
1357
1358
1359
1360 if (kmemleak_stack_scan) {
1361 struct task_struct *p, *g;
1362
1363 read_lock(&tasklist_lock);
1364 do_each_thread(g, p) {
1365 scan_block(task_stack_page(p), task_stack_page(p) +
1366 THREAD_SIZE, NULL, 0);
1367 } while_each_thread(g, p);
1368 read_unlock(&tasklist_lock);
1369 }
1370
1371
1372
1373
1374
1375 scan_gray_list();
1376
1377
1378
1379
1380
1381 rcu_read_lock();
1382 list_for_each_entry_rcu(object, &object_list, object_list) {
1383 spin_lock_irqsave(&object->lock, flags);
1384 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1385 && update_checksum(object) && get_object(object)) {
1386
1387 object->count = object->min_count;
1388 list_add_tail(&object->gray_list, &gray_list);
1389 }
1390 spin_unlock_irqrestore(&object->lock, flags);
1391 }
1392 rcu_read_unlock();
1393
1394
1395
1396
1397 scan_gray_list();
1398
1399
1400
1401
1402 if (scan_should_stop())
1403 return;
1404
1405
1406
1407
1408 rcu_read_lock();
1409 list_for_each_entry_rcu(object, &object_list, object_list) {
1410 spin_lock_irqsave(&object->lock, flags);
1411 if (unreferenced_object(object) &&
1412 !(object->flags & OBJECT_REPORTED)) {
1413 object->flags |= OBJECT_REPORTED;
1414 new_leaks++;
1415 }
1416 spin_unlock_irqrestore(&object->lock, flags);
1417 }
1418 rcu_read_unlock();
1419
1420 if (new_leaks) {
1421 kmemleak_found_leaks = true;
1422
1423 pr_info("%d new suspected memory leaks (see "
1424 "/sys/kernel/debug/kmemleak)\n", new_leaks);
1425 }
1426
1427}
1428
1429
1430
1431
1432
1433static int kmemleak_scan_thread(void *arg)
1434{
1435 static int first_run = 1;
1436
1437 pr_info("Automatic memory scanning thread started\n");
1438 set_user_nice(current, 10);
1439
1440
1441
1442
1443 if (first_run) {
1444 first_run = 0;
1445 ssleep(SECS_FIRST_SCAN);
1446 }
1447
1448 while (!kthread_should_stop()) {
1449 signed long timeout = jiffies_scan_wait;
1450
1451 mutex_lock(&scan_mutex);
1452 kmemleak_scan();
1453 mutex_unlock(&scan_mutex);
1454
1455
1456 while (timeout && !kthread_should_stop())
1457 timeout = schedule_timeout_interruptible(timeout);
1458 }
1459
1460 pr_info("Automatic memory scanning thread ended\n");
1461
1462 return 0;
1463}
1464
1465
1466
1467
1468
1469static void start_scan_thread(void)
1470{
1471 if (scan_thread)
1472 return;
1473 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1474 if (IS_ERR(scan_thread)) {
1475 pr_warning("Failed to create the scan thread\n");
1476 scan_thread = NULL;
1477 }
1478}
1479
1480
1481
1482
1483
1484static void stop_scan_thread(void)
1485{
1486 if (scan_thread) {
1487 kthread_stop(scan_thread);
1488 scan_thread = NULL;
1489 }
1490}
1491
1492
1493
1494
1495
1496
1497static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1498{
1499 struct kmemleak_object *object;
1500 loff_t n = *pos;
1501 int err;
1502
1503 err = mutex_lock_interruptible(&scan_mutex);
1504 if (err < 0)
1505 return ERR_PTR(err);
1506
1507 rcu_read_lock();
1508 list_for_each_entry_rcu(object, &object_list, object_list) {
1509 if (n-- > 0)
1510 continue;
1511 if (get_object(object))
1512 goto out;
1513 }
1514 object = NULL;
1515out:
1516 return object;
1517}
1518
1519
1520
1521
1522
1523static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1524{
1525 struct kmemleak_object *prev_obj = v;
1526 struct kmemleak_object *next_obj = NULL;
1527 struct kmemleak_object *obj = prev_obj;
1528
1529 ++(*pos);
1530
1531 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1532 if (get_object(obj)) {
1533 next_obj = obj;
1534 break;
1535 }
1536 }
1537
1538 put_object(prev_obj);
1539 return next_obj;
1540}
1541
1542
1543
1544
1545static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1546{
1547 if (!IS_ERR(v)) {
1548
1549
1550
1551
1552 rcu_read_unlock();
1553 mutex_unlock(&scan_mutex);
1554 if (v)
1555 put_object(v);
1556 }
1557}
1558
1559
1560
1561
1562static int kmemleak_seq_show(struct seq_file *seq, void *v)
1563{
1564 struct kmemleak_object *object = v;
1565 unsigned long flags;
1566
1567 spin_lock_irqsave(&object->lock, flags);
1568 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1569 print_unreferenced(seq, object);
1570 spin_unlock_irqrestore(&object->lock, flags);
1571 return 0;
1572}
1573
1574static const struct seq_operations kmemleak_seq_ops = {
1575 .start = kmemleak_seq_start,
1576 .next = kmemleak_seq_next,
1577 .stop = kmemleak_seq_stop,
1578 .show = kmemleak_seq_show,
1579};
1580
1581static int kmemleak_open(struct inode *inode, struct file *file)
1582{
1583 return seq_open(file, &kmemleak_seq_ops);
1584}
1585
1586static int dump_str_object_info(const char *str)
1587{
1588 unsigned long flags;
1589 struct kmemleak_object *object;
1590 unsigned long addr;
1591
1592 if (kstrtoul(str, 0, &addr))
1593 return -EINVAL;
1594 object = find_and_get_object(addr, 0);
1595 if (!object) {
1596 pr_info("Unknown object at 0x%08lx\n", addr);
1597 return -EINVAL;
1598 }
1599
1600 spin_lock_irqsave(&object->lock, flags);
1601 dump_object_info(object);
1602 spin_unlock_irqrestore(&object->lock, flags);
1603
1604 put_object(object);
1605 return 0;
1606}
1607
1608
1609
1610
1611
1612
1613
1614static void kmemleak_clear(void)
1615{
1616 struct kmemleak_object *object;
1617 unsigned long flags;
1618
1619 rcu_read_lock();
1620 list_for_each_entry_rcu(object, &object_list, object_list) {
1621 spin_lock_irqsave(&object->lock, flags);
1622 if ((object->flags & OBJECT_REPORTED) &&
1623 unreferenced_object(object))
1624 __paint_it(object, KMEMLEAK_GREY);
1625 spin_unlock_irqrestore(&object->lock, flags);
1626 }
1627 rcu_read_unlock();
1628
1629 kmemleak_found_leaks = false;
1630}
1631
1632static void __kmemleak_do_cleanup(void);
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1651 size_t size, loff_t *ppos)
1652{
1653 char buf[64];
1654 int buf_size;
1655 int ret;
1656
1657 buf_size = min(size, (sizeof(buf) - 1));
1658 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1659 return -EFAULT;
1660 buf[buf_size] = 0;
1661
1662 ret = mutex_lock_interruptible(&scan_mutex);
1663 if (ret < 0)
1664 return ret;
1665
1666 if (strncmp(buf, "clear", 5) == 0) {
1667 if (kmemleak_enabled)
1668 kmemleak_clear();
1669 else
1670 __kmemleak_do_cleanup();
1671 goto out;
1672 }
1673
1674 if (!kmemleak_enabled) {
1675 ret = -EBUSY;
1676 goto out;
1677 }
1678
1679 if (strncmp(buf, "off", 3) == 0)
1680 kmemleak_disable();
1681 else if (strncmp(buf, "stack=on", 8) == 0)
1682 kmemleak_stack_scan = 1;
1683 else if (strncmp(buf, "stack=off", 9) == 0)
1684 kmemleak_stack_scan = 0;
1685 else if (strncmp(buf, "scan=on", 7) == 0)
1686 start_scan_thread();
1687 else if (strncmp(buf, "scan=off", 8) == 0)
1688 stop_scan_thread();
1689 else if (strncmp(buf, "scan=", 5) == 0) {
1690 unsigned long secs;
1691
1692 ret = kstrtoul(buf + 5, 0, &secs);
1693 if (ret < 0)
1694 goto out;
1695 stop_scan_thread();
1696 if (secs) {
1697 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1698 start_scan_thread();
1699 }
1700 } else if (strncmp(buf, "scan", 4) == 0)
1701 kmemleak_scan();
1702 else if (strncmp(buf, "dump=", 5) == 0)
1703 ret = dump_str_object_info(buf + 5);
1704 else
1705 ret = -EINVAL;
1706
1707out:
1708 mutex_unlock(&scan_mutex);
1709 if (ret < 0)
1710 return ret;
1711
1712
1713 *ppos += size;
1714 return size;
1715}
1716
1717static const struct file_operations kmemleak_fops = {
1718 .owner = THIS_MODULE,
1719 .open = kmemleak_open,
1720 .read = seq_read,
1721 .write = kmemleak_write,
1722 .llseek = seq_lseek,
1723 .release = seq_release,
1724};
1725
1726static void __kmemleak_do_cleanup(void)
1727{
1728 struct kmemleak_object *object;
1729
1730 rcu_read_lock();
1731 list_for_each_entry_rcu(object, &object_list, object_list)
1732 delete_object_full(object->pointer);
1733 rcu_read_unlock();
1734}
1735
1736
1737
1738
1739
1740
1741static void kmemleak_do_cleanup(struct work_struct *work)
1742{
1743 mutex_lock(&scan_mutex);
1744 stop_scan_thread();
1745
1746 if (!kmemleak_found_leaks)
1747 __kmemleak_do_cleanup();
1748 else
1749 pr_info("Kmemleak disabled without freeing internal data. "
1750 "Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n");
1751 mutex_unlock(&scan_mutex);
1752}
1753
1754static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1755
1756
1757
1758
1759
1760static void kmemleak_disable(void)
1761{
1762
1763 if (cmpxchg(&kmemleak_error, 0, 1))
1764 return;
1765
1766
1767 kmemleak_enabled = 0;
1768
1769
1770 if (kmemleak_initialized)
1771 schedule_work(&cleanup_work);
1772
1773 pr_info("Kernel memory leak detector disabled\n");
1774}
1775
1776
1777
1778
1779static int kmemleak_boot_config(char *str)
1780{
1781 if (!str)
1782 return -EINVAL;
1783 if (strcmp(str, "off") == 0)
1784 kmemleak_disable();
1785 else if (strcmp(str, "on") == 0)
1786 kmemleak_skip_disable = 1;
1787 else
1788 return -EINVAL;
1789 return 0;
1790}
1791early_param("kmemleak", kmemleak_boot_config);
1792
1793static void __init print_log_trace(struct early_log *log)
1794{
1795 struct stack_trace trace;
1796
1797 trace.nr_entries = log->trace_len;
1798 trace.entries = log->trace;
1799
1800 pr_notice("Early log backtrace:\n");
1801 print_stack_trace(&trace, 2);
1802}
1803
1804
1805
1806
1807void __init kmemleak_init(void)
1808{
1809 int i;
1810 unsigned long flags;
1811
1812#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1813 if (!kmemleak_skip_disable) {
1814 kmemleak_early_log = 0;
1815 kmemleak_disable();
1816 return;
1817 }
1818#endif
1819
1820 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1821 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1822
1823 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1824 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1825
1826 if (crt_early_log >= ARRAY_SIZE(early_log))
1827 pr_warning("Early log buffer exceeded (%d), please increase "
1828 "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
1829
1830
1831 local_irq_save(flags);
1832 kmemleak_early_log = 0;
1833 if (kmemleak_error) {
1834 local_irq_restore(flags);
1835 return;
1836 } else
1837 kmemleak_enabled = 1;
1838 local_irq_restore(flags);
1839
1840
1841
1842
1843
1844
1845 for (i = 0; i < crt_early_log; i++) {
1846 struct early_log *log = &early_log[i];
1847
1848 switch (log->op_type) {
1849 case KMEMLEAK_ALLOC:
1850 early_alloc(log);
1851 break;
1852 case KMEMLEAK_ALLOC_PERCPU:
1853 early_alloc_percpu(log);
1854 break;
1855 case KMEMLEAK_FREE:
1856 kmemleak_free(log->ptr);
1857 break;
1858 case KMEMLEAK_FREE_PART:
1859 kmemleak_free_part(log->ptr, log->size);
1860 break;
1861 case KMEMLEAK_FREE_PERCPU:
1862 kmemleak_free_percpu(log->ptr);
1863 break;
1864 case KMEMLEAK_NOT_LEAK:
1865 kmemleak_not_leak(log->ptr);
1866 break;
1867 case KMEMLEAK_IGNORE:
1868 kmemleak_ignore(log->ptr);
1869 break;
1870 case KMEMLEAK_SCAN_AREA:
1871 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1872 break;
1873 case KMEMLEAK_NO_SCAN:
1874 kmemleak_no_scan(log->ptr);
1875 break;
1876 default:
1877 kmemleak_warn("Unknown early log operation: %d\n",
1878 log->op_type);
1879 }
1880
1881 if (kmemleak_warning) {
1882 print_log_trace(log);
1883 kmemleak_warning = 0;
1884 }
1885 }
1886}
1887
1888
1889
1890
1891static int __init kmemleak_late_init(void)
1892{
1893 struct dentry *dentry;
1894
1895 kmemleak_initialized = 1;
1896
1897 if (kmemleak_error) {
1898
1899
1900
1901
1902
1903
1904 schedule_work(&cleanup_work);
1905 return -ENOMEM;
1906 }
1907
1908 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1909 &kmemleak_fops);
1910 if (!dentry)
1911 pr_warning("Failed to create the debugfs kmemleak file\n");
1912 mutex_lock(&scan_mutex);
1913 start_scan_thread();
1914 mutex_unlock(&scan_mutex);
1915
1916 pr_info("Kernel memory leak detector initialized\n");
1917
1918 return 0;
1919}
1920late_initcall(kmemleak_late_init);
1921