1
2
3
4
5
6
7
8
9
10#include <linux/debugobjects.h>
11#include <linux/interrupt.h>
12#include <linux/sched.h>
13#include <linux/seq_file.h>
14#include <linux/debugfs.h>
15#include <linux/slab.h>
16#include <linux/hash.h>
17#include <linux/kmemleak.h>
18
19#define ODEBUG_HASH_BITS 14
20#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
21
22#define ODEBUG_POOL_SIZE 1024
23#define ODEBUG_POOL_MIN_LEVEL 256
24
25#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
26#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
27#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
28
29struct debug_bucket {
30 struct hlist_head list;
31 raw_spinlock_t lock;
32};
33
34static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
35
36static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
37
38static DEFINE_RAW_SPINLOCK(pool_lock);
39
40static HLIST_HEAD(obj_pool);
41
42static int obj_pool_min_free = ODEBUG_POOL_SIZE;
43static int obj_pool_free = ODEBUG_POOL_SIZE;
44static int obj_pool_used;
45static int obj_pool_max_used;
46static struct kmem_cache *obj_cache;
47
48static int debug_objects_maxchain __read_mostly;
49static int debug_objects_fixups __read_mostly;
50static int debug_objects_warnings __read_mostly;
51static int debug_objects_enabled __read_mostly
52 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
53static int debug_objects_pool_size __read_mostly
54 = ODEBUG_POOL_SIZE;
55static int debug_objects_pool_min_level __read_mostly
56 = ODEBUG_POOL_MIN_LEVEL;
57static struct debug_obj_descr *descr_test __read_mostly;
58
59
60
61
62static int debug_objects_alloc;
63static int debug_objects_freed;
64
65static void free_obj_work(struct work_struct *work);
66static DECLARE_WORK(debug_obj_work, free_obj_work);
67
68static int __init enable_object_debug(char *str)
69{
70 debug_objects_enabled = 1;
71 return 0;
72}
73
74static int __init disable_object_debug(char *str)
75{
76 debug_objects_enabled = 0;
77 return 0;
78}
79
80early_param("debug_objects", enable_object_debug);
81early_param("no_debug_objects", disable_object_debug);
82
83static const char *obj_states[ODEBUG_STATE_MAX] = {
84 [ODEBUG_STATE_NONE] = "none",
85 [ODEBUG_STATE_INIT] = "initialized",
86 [ODEBUG_STATE_INACTIVE] = "inactive",
87 [ODEBUG_STATE_ACTIVE] = "active",
88 [ODEBUG_STATE_DESTROYED] = "destroyed",
89 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
90};
91
92static void fill_pool(void)
93{
94 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
95 struct debug_obj *new;
96 unsigned long flags;
97
98 if (likely(obj_pool_free >= debug_objects_pool_min_level))
99 return;
100
101 if (unlikely(!obj_cache))
102 return;
103
104 while (obj_pool_free < debug_objects_pool_min_level) {
105
106 new = kmem_cache_zalloc(obj_cache, gfp);
107 if (!new)
108 return;
109
110 kmemleak_ignore(new);
111 raw_spin_lock_irqsave(&pool_lock, flags);
112 hlist_add_head(&new->node, &obj_pool);
113 debug_objects_alloc++;
114 obj_pool_free++;
115 raw_spin_unlock_irqrestore(&pool_lock, flags);
116 }
117}
118
119
120
121
122static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
123{
124 struct debug_obj *obj;
125 int cnt = 0;
126
127 hlist_for_each_entry(obj, &b->list, node) {
128 cnt++;
129 if (obj->object == addr)
130 return obj;
131 }
132 if (cnt > debug_objects_maxchain)
133 debug_objects_maxchain = cnt;
134
135 return NULL;
136}
137
138
139
140
141
142static struct debug_obj *
143alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
144{
145 struct debug_obj *obj = NULL;
146
147 raw_spin_lock(&pool_lock);
148 if (obj_pool.first) {
149 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
150
151 obj->object = addr;
152 obj->descr = descr;
153 obj->state = ODEBUG_STATE_NONE;
154 obj->astate = 0;
155 hlist_del(&obj->node);
156
157 hlist_add_head(&obj->node, &b->list);
158
159 obj_pool_used++;
160 if (obj_pool_used > obj_pool_max_used)
161 obj_pool_max_used = obj_pool_used;
162
163 obj_pool_free--;
164 if (obj_pool_free < obj_pool_min_free)
165 obj_pool_min_free = obj_pool_free;
166 }
167 raw_spin_unlock(&pool_lock);
168
169 return obj;
170}
171
172
173
174
175
176
177
178
179#define ODEBUG_FREE_BATCH 4
180static void free_obj_work(struct work_struct *work)
181{
182 struct debug_obj *objs[ODEBUG_FREE_BATCH];
183 unsigned long flags;
184 int i;
185
186 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
187 return;
188 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
189 for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
190 objs[i] = hlist_entry(obj_pool.first,
191 typeof(*objs[0]), node);
192 hlist_del(&objs[i]->node);
193 }
194
195 obj_pool_free -= ODEBUG_FREE_BATCH;
196 debug_objects_freed += ODEBUG_FREE_BATCH;
197
198
199
200
201 raw_spin_unlock_irqrestore(&pool_lock, flags);
202 for (i = 0; i < ODEBUG_FREE_BATCH; i++)
203 kmem_cache_free(obj_cache, objs[i]);
204 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
205 return;
206 }
207 raw_spin_unlock_irqrestore(&pool_lock, flags);
208}
209
210
211
212
213
214static void free_object(struct debug_obj *obj)
215{
216 unsigned long flags;
217 int sched = 0;
218
219 raw_spin_lock_irqsave(&pool_lock, flags);
220
221
222
223
224 if (obj_pool_free > debug_objects_pool_size && obj_cache)
225 sched = keventd_up() && !work_pending(&debug_obj_work);
226 hlist_add_head(&obj->node, &obj_pool);
227 obj_pool_free++;
228 obj_pool_used--;
229 raw_spin_unlock_irqrestore(&pool_lock, flags);
230 if (sched)
231 schedule_work(&debug_obj_work);
232}
233
234
235
236
237
238static void debug_objects_oom(void)
239{
240 struct debug_bucket *db = obj_hash;
241 struct hlist_node *tmp;
242 HLIST_HEAD(freelist);
243 struct debug_obj *obj;
244 unsigned long flags;
245 int i;
246
247 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
248
249 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
250 raw_spin_lock_irqsave(&db->lock, flags);
251 hlist_move_list(&db->list, &freelist);
252 raw_spin_unlock_irqrestore(&db->lock, flags);
253
254
255 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
256 hlist_del(&obj->node);
257 free_object(obj);
258 }
259 }
260}
261
262
263
264
265
266static struct debug_bucket *get_bucket(unsigned long addr)
267{
268 unsigned long hash;
269
270 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
271 return &obj_hash[hash];
272}
273
274static void debug_print_object(struct debug_obj *obj, char *msg)
275{
276 struct debug_obj_descr *descr = obj->descr;
277 static int limit;
278
279 if (limit < 5 && descr != descr_test) {
280 void *hint = descr->debug_hint ?
281 descr->debug_hint(obj->object) : NULL;
282 limit++;
283 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
284 "object type: %s hint: %pS\n",
285 msg, obj_states[obj->state], obj->astate,
286 descr->name, hint);
287 }
288 debug_objects_warnings++;
289}
290
291
292
293
294
295static int
296debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
297 void * addr, enum debug_obj_state state)
298{
299 int fixed = 0;
300
301 if (fixup)
302 fixed = fixup(addr, state);
303 debug_objects_fixups += fixed;
304 return fixed;
305}
306
307static void debug_object_is_on_stack(void *addr, int onstack)
308{
309 int is_on_stack;
310 static int limit;
311
312 if (limit > 4)
313 return;
314
315 is_on_stack = object_is_on_stack(addr);
316 if (is_on_stack == onstack)
317 return;
318
319 limit++;
320 if (is_on_stack)
321 printk(KERN_WARNING
322 "ODEBUG: object is on stack, but not annotated\n");
323 else
324 printk(KERN_WARNING
325 "ODEBUG: object is not on stack, but annotated\n");
326 WARN_ON(1);
327}
328
329static void
330__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
331{
332 enum debug_obj_state state;
333 struct debug_bucket *db;
334 struct debug_obj *obj;
335 unsigned long flags;
336
337 fill_pool();
338
339 db = get_bucket((unsigned long) addr);
340
341 raw_spin_lock_irqsave(&db->lock, flags);
342
343 obj = lookup_object(addr, db);
344 if (!obj) {
345 obj = alloc_object(addr, db, descr);
346 if (!obj) {
347 debug_objects_enabled = 0;
348 raw_spin_unlock_irqrestore(&db->lock, flags);
349 debug_objects_oom();
350 return;
351 }
352 debug_object_is_on_stack(addr, onstack);
353 }
354
355 switch (obj->state) {
356 case ODEBUG_STATE_NONE:
357 case ODEBUG_STATE_INIT:
358 case ODEBUG_STATE_INACTIVE:
359 obj->state = ODEBUG_STATE_INIT;
360 break;
361
362 case ODEBUG_STATE_ACTIVE:
363 debug_print_object(obj, "init");
364 state = obj->state;
365 raw_spin_unlock_irqrestore(&db->lock, flags);
366 debug_object_fixup(descr->fixup_init, addr, state);
367 return;
368
369 case ODEBUG_STATE_DESTROYED:
370 debug_print_object(obj, "init");
371 break;
372 default:
373 break;
374 }
375
376 raw_spin_unlock_irqrestore(&db->lock, flags);
377}
378
379
380
381
382
383
384void debug_object_init(void *addr, struct debug_obj_descr *descr)
385{
386 if (!debug_objects_enabled)
387 return;
388
389 __debug_object_init(addr, descr, 0);
390}
391
392
393
394
395
396
397
398void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
399{
400 if (!debug_objects_enabled)
401 return;
402
403 __debug_object_init(addr, descr, 1);
404}
405
406
407
408
409
410
411void debug_object_activate(void *addr, struct debug_obj_descr *descr)
412{
413 enum debug_obj_state state;
414 struct debug_bucket *db;
415 struct debug_obj *obj;
416 unsigned long flags;
417 struct debug_obj o = { .object = addr,
418 .state = ODEBUG_STATE_NOTAVAILABLE,
419 .descr = descr };
420
421 if (!debug_objects_enabled)
422 return;
423
424 db = get_bucket((unsigned long) addr);
425
426 raw_spin_lock_irqsave(&db->lock, flags);
427
428 obj = lookup_object(addr, db);
429 if (obj) {
430 switch (obj->state) {
431 case ODEBUG_STATE_INIT:
432 case ODEBUG_STATE_INACTIVE:
433 obj->state = ODEBUG_STATE_ACTIVE;
434 break;
435
436 case ODEBUG_STATE_ACTIVE:
437 debug_print_object(obj, "activate");
438 state = obj->state;
439 raw_spin_unlock_irqrestore(&db->lock, flags);
440 debug_object_fixup(descr->fixup_activate, addr, state);
441 return;
442
443 case ODEBUG_STATE_DESTROYED:
444 debug_print_object(obj, "activate");
445 break;
446 default:
447 break;
448 }
449 raw_spin_unlock_irqrestore(&db->lock, flags);
450 return;
451 }
452
453 raw_spin_unlock_irqrestore(&db->lock, flags);
454
455
456
457
458
459 if (debug_object_fixup(descr->fixup_activate, addr,
460 ODEBUG_STATE_NOTAVAILABLE))
461 debug_print_object(&o, "activate");
462}
463
464
465
466
467
468
469void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
470{
471 struct debug_bucket *db;
472 struct debug_obj *obj;
473 unsigned long flags;
474
475 if (!debug_objects_enabled)
476 return;
477
478 db = get_bucket((unsigned long) addr);
479
480 raw_spin_lock_irqsave(&db->lock, flags);
481
482 obj = lookup_object(addr, db);
483 if (obj) {
484 switch (obj->state) {
485 case ODEBUG_STATE_INIT:
486 case ODEBUG_STATE_INACTIVE:
487 case ODEBUG_STATE_ACTIVE:
488 if (!obj->astate)
489 obj->state = ODEBUG_STATE_INACTIVE;
490 else
491 debug_print_object(obj, "deactivate");
492 break;
493
494 case ODEBUG_STATE_DESTROYED:
495 debug_print_object(obj, "deactivate");
496 break;
497 default:
498 break;
499 }
500 } else {
501 struct debug_obj o = { .object = addr,
502 .state = ODEBUG_STATE_NOTAVAILABLE,
503 .descr = descr };
504
505 debug_print_object(&o, "deactivate");
506 }
507
508 raw_spin_unlock_irqrestore(&db->lock, flags);
509}
510
511
512
513
514
515
516void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
517{
518 enum debug_obj_state state;
519 struct debug_bucket *db;
520 struct debug_obj *obj;
521 unsigned long flags;
522
523 if (!debug_objects_enabled)
524 return;
525
526 db = get_bucket((unsigned long) addr);
527
528 raw_spin_lock_irqsave(&db->lock, flags);
529
530 obj = lookup_object(addr, db);
531 if (!obj)
532 goto out_unlock;
533
534 switch (obj->state) {
535 case ODEBUG_STATE_NONE:
536 case ODEBUG_STATE_INIT:
537 case ODEBUG_STATE_INACTIVE:
538 obj->state = ODEBUG_STATE_DESTROYED;
539 break;
540 case ODEBUG_STATE_ACTIVE:
541 debug_print_object(obj, "destroy");
542 state = obj->state;
543 raw_spin_unlock_irqrestore(&db->lock, flags);
544 debug_object_fixup(descr->fixup_destroy, addr, state);
545 return;
546
547 case ODEBUG_STATE_DESTROYED:
548 debug_print_object(obj, "destroy");
549 break;
550 default:
551 break;
552 }
553out_unlock:
554 raw_spin_unlock_irqrestore(&db->lock, flags);
555}
556
557
558
559
560
561
562void debug_object_free(void *addr, struct debug_obj_descr *descr)
563{
564 enum debug_obj_state state;
565 struct debug_bucket *db;
566 struct debug_obj *obj;
567 unsigned long flags;
568
569 if (!debug_objects_enabled)
570 return;
571
572 db = get_bucket((unsigned long) addr);
573
574 raw_spin_lock_irqsave(&db->lock, flags);
575
576 obj = lookup_object(addr, db);
577 if (!obj)
578 goto out_unlock;
579
580 switch (obj->state) {
581 case ODEBUG_STATE_ACTIVE:
582 debug_print_object(obj, "free");
583 state = obj->state;
584 raw_spin_unlock_irqrestore(&db->lock, flags);
585 debug_object_fixup(descr->fixup_free, addr, state);
586 return;
587 default:
588 hlist_del(&obj->node);
589 raw_spin_unlock_irqrestore(&db->lock, flags);
590 free_object(obj);
591 return;
592 }
593out_unlock:
594 raw_spin_unlock_irqrestore(&db->lock, flags);
595}
596
597
598
599
600
601
602void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
603{
604 struct debug_bucket *db;
605 struct debug_obj *obj;
606 unsigned long flags;
607
608 if (!debug_objects_enabled)
609 return;
610
611 db = get_bucket((unsigned long) addr);
612
613 raw_spin_lock_irqsave(&db->lock, flags);
614
615 obj = lookup_object(addr, db);
616 if (!obj) {
617 struct debug_obj o = { .object = addr,
618 .state = ODEBUG_STATE_NOTAVAILABLE,
619 .descr = descr };
620
621 raw_spin_unlock_irqrestore(&db->lock, flags);
622
623
624
625
626 if (debug_object_fixup(descr->fixup_assert_init, addr,
627 ODEBUG_STATE_NOTAVAILABLE))
628 debug_print_object(&o, "assert_init");
629 return;
630 }
631
632 raw_spin_unlock_irqrestore(&db->lock, flags);
633}
634
635
636
637
638
639
640
641
642void
643debug_object_active_state(void *addr, struct debug_obj_descr *descr,
644 unsigned int expect, unsigned int next)
645{
646 struct debug_bucket *db;
647 struct debug_obj *obj;
648 unsigned long flags;
649
650 if (!debug_objects_enabled)
651 return;
652
653 db = get_bucket((unsigned long) addr);
654
655 raw_spin_lock_irqsave(&db->lock, flags);
656
657 obj = lookup_object(addr, db);
658 if (obj) {
659 switch (obj->state) {
660 case ODEBUG_STATE_ACTIVE:
661 if (obj->astate == expect)
662 obj->astate = next;
663 else
664 debug_print_object(obj, "active_state");
665 break;
666
667 default:
668 debug_print_object(obj, "active_state");
669 break;
670 }
671 } else {
672 struct debug_obj o = { .object = addr,
673 .state = ODEBUG_STATE_NOTAVAILABLE,
674 .descr = descr };
675
676 debug_print_object(&o, "active_state");
677 }
678
679 raw_spin_unlock_irqrestore(&db->lock, flags);
680}
681
682#ifdef CONFIG_DEBUG_OBJECTS_FREE
683static void __debug_check_no_obj_freed(const void *address, unsigned long size)
684{
685 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
686 struct hlist_node *tmp;
687 HLIST_HEAD(freelist);
688 struct debug_obj_descr *descr;
689 enum debug_obj_state state;
690 struct debug_bucket *db;
691 struct debug_obj *obj;
692 int cnt;
693
694 saddr = (unsigned long) address;
695 eaddr = saddr + size;
696 paddr = saddr & ODEBUG_CHUNK_MASK;
697 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
698 chunks >>= ODEBUG_CHUNK_SHIFT;
699
700 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
701 db = get_bucket(paddr);
702
703repeat:
704 cnt = 0;
705 raw_spin_lock_irqsave(&db->lock, flags);
706 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
707 cnt++;
708 oaddr = (unsigned long) obj->object;
709 if (oaddr < saddr || oaddr >= eaddr)
710 continue;
711
712 switch (obj->state) {
713 case ODEBUG_STATE_ACTIVE:
714 debug_print_object(obj, "free");
715 descr = obj->descr;
716 state = obj->state;
717 raw_spin_unlock_irqrestore(&db->lock, flags);
718 debug_object_fixup(descr->fixup_free,
719 (void *) oaddr, state);
720 goto repeat;
721 default:
722 hlist_del(&obj->node);
723 hlist_add_head(&obj->node, &freelist);
724 break;
725 }
726 }
727 raw_spin_unlock_irqrestore(&db->lock, flags);
728
729
730 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
731 hlist_del(&obj->node);
732 free_object(obj);
733 }
734
735 if (cnt > debug_objects_maxchain)
736 debug_objects_maxchain = cnt;
737 }
738}
739
740void debug_check_no_obj_freed(const void *address, unsigned long size)
741{
742 if (debug_objects_enabled)
743 __debug_check_no_obj_freed(address, size);
744}
745#endif
746
747#ifdef CONFIG_DEBUG_FS
748
749static int debug_stats_show(struct seq_file *m, void *v)
750{
751 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
752 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
753 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
754 seq_printf(m, "pool_free :%d\n", obj_pool_free);
755 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
756 seq_printf(m, "pool_used :%d\n", obj_pool_used);
757 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
758 seq_printf(m, "objects_alloc :%d\n", debug_objects_alloc);
759 seq_printf(m, "objects_freed :%d\n", debug_objects_freed);
760 return 0;
761}
762
763static int debug_stats_open(struct inode *inode, struct file *filp)
764{
765 return single_open(filp, debug_stats_show, NULL);
766}
767
768static const struct file_operations debug_stats_fops = {
769 .open = debug_stats_open,
770 .read = seq_read,
771 .llseek = seq_lseek,
772 .release = single_release,
773};
774
775static int __init debug_objects_init_debugfs(void)
776{
777 struct dentry *dbgdir, *dbgstats;
778
779 if (!debug_objects_enabled)
780 return 0;
781
782 dbgdir = debugfs_create_dir("debug_objects", NULL);
783 if (!dbgdir)
784 return -ENOMEM;
785
786 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
787 &debug_stats_fops);
788 if (!dbgstats)
789 goto err;
790
791 return 0;
792
793err:
794 debugfs_remove(dbgdir);
795
796 return -ENOMEM;
797}
798__initcall(debug_objects_init_debugfs);
799
800#else
801static inline void debug_objects_init_debugfs(void) { }
802#endif
803
804#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
805
806
807struct self_test {
808 unsigned long dummy1[6];
809 int static_init;
810 unsigned long dummy2[3];
811};
812
813static __initdata struct debug_obj_descr descr_type_test;
814
815
816
817
818
819static int __init fixup_init(void *addr, enum debug_obj_state state)
820{
821 struct self_test *obj = addr;
822
823 switch (state) {
824 case ODEBUG_STATE_ACTIVE:
825 debug_object_deactivate(obj, &descr_type_test);
826 debug_object_init(obj, &descr_type_test);
827 return 1;
828 default:
829 return 0;
830 }
831}
832
833
834
835
836
837
838static int __init fixup_activate(void *addr, enum debug_obj_state state)
839{
840 struct self_test *obj = addr;
841
842 switch (state) {
843 case ODEBUG_STATE_NOTAVAILABLE:
844 if (obj->static_init == 1) {
845 debug_object_init(obj, &descr_type_test);
846 debug_object_activate(obj, &descr_type_test);
847 return 0;
848 }
849 return 1;
850
851 case ODEBUG_STATE_ACTIVE:
852 debug_object_deactivate(obj, &descr_type_test);
853 debug_object_activate(obj, &descr_type_test);
854 return 1;
855
856 default:
857 return 0;
858 }
859}
860
861
862
863
864
865static int __init fixup_destroy(void *addr, enum debug_obj_state state)
866{
867 struct self_test *obj = addr;
868
869 switch (state) {
870 case ODEBUG_STATE_ACTIVE:
871 debug_object_deactivate(obj, &descr_type_test);
872 debug_object_destroy(obj, &descr_type_test);
873 return 1;
874 default:
875 return 0;
876 }
877}
878
879
880
881
882
883static int __init fixup_free(void *addr, enum debug_obj_state state)
884{
885 struct self_test *obj = addr;
886
887 switch (state) {
888 case ODEBUG_STATE_ACTIVE:
889 debug_object_deactivate(obj, &descr_type_test);
890 debug_object_free(obj, &descr_type_test);
891 return 1;
892 default:
893 return 0;
894 }
895}
896
897static int __init
898check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
899{
900 struct debug_bucket *db;
901 struct debug_obj *obj;
902 unsigned long flags;
903 int res = -EINVAL;
904
905 db = get_bucket((unsigned long) addr);
906
907 raw_spin_lock_irqsave(&db->lock, flags);
908
909 obj = lookup_object(addr, db);
910 if (!obj && state != ODEBUG_STATE_NONE) {
911 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
912 goto out;
913 }
914 if (obj && obj->state != state) {
915 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
916 obj->state, state);
917 goto out;
918 }
919 if (fixups != debug_objects_fixups) {
920 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
921 fixups, debug_objects_fixups);
922 goto out;
923 }
924 if (warnings != debug_objects_warnings) {
925 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
926 warnings, debug_objects_warnings);
927 goto out;
928 }
929 res = 0;
930out:
931 raw_spin_unlock_irqrestore(&db->lock, flags);
932 if (res)
933 debug_objects_enabled = 0;
934 return res;
935}
936
937static __initdata struct debug_obj_descr descr_type_test = {
938 .name = "selftest",
939 .fixup_init = fixup_init,
940 .fixup_activate = fixup_activate,
941 .fixup_destroy = fixup_destroy,
942 .fixup_free = fixup_free,
943};
944
945static __initdata struct self_test obj = { .static_init = 0 };
946
947static void __init debug_objects_selftest(void)
948{
949 int fixups, oldfixups, warnings, oldwarnings;
950 unsigned long flags;
951
952 local_irq_save(flags);
953
954 fixups = oldfixups = debug_objects_fixups;
955 warnings = oldwarnings = debug_objects_warnings;
956 descr_test = &descr_type_test;
957
958 debug_object_init(&obj, &descr_type_test);
959 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
960 goto out;
961 debug_object_activate(&obj, &descr_type_test);
962 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
963 goto out;
964 debug_object_activate(&obj, &descr_type_test);
965 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
966 goto out;
967 debug_object_deactivate(&obj, &descr_type_test);
968 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
969 goto out;
970 debug_object_destroy(&obj, &descr_type_test);
971 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
972 goto out;
973 debug_object_init(&obj, &descr_type_test);
974 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
975 goto out;
976 debug_object_activate(&obj, &descr_type_test);
977 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
978 goto out;
979 debug_object_deactivate(&obj, &descr_type_test);
980 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
981 goto out;
982 debug_object_free(&obj, &descr_type_test);
983 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
984 goto out;
985
986 obj.static_init = 1;
987 debug_object_activate(&obj, &descr_type_test);
988 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
989 goto out;
990 debug_object_init(&obj, &descr_type_test);
991 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
992 goto out;
993 debug_object_free(&obj, &descr_type_test);
994 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
995 goto out;
996
997#ifdef CONFIG_DEBUG_OBJECTS_FREE
998 debug_object_init(&obj, &descr_type_test);
999 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1000 goto out;
1001 debug_object_activate(&obj, &descr_type_test);
1002 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1003 goto out;
1004 __debug_check_no_obj_freed(&obj, sizeof(obj));
1005 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1006 goto out;
1007#endif
1008 printk(KERN_INFO "ODEBUG: selftest passed\n");
1009
1010out:
1011 debug_objects_fixups = oldfixups;
1012 debug_objects_warnings = oldwarnings;
1013 descr_test = NULL;
1014
1015 local_irq_restore(flags);
1016}
1017#else
1018static inline void debug_objects_selftest(void) { }
1019#endif
1020
1021
1022
1023
1024
1025
1026void __init debug_objects_early_init(void)
1027{
1028 int i;
1029
1030 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1031 raw_spin_lock_init(&obj_hash[i].lock);
1032
1033 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1034 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1035}
1036
1037
1038
1039
1040static int __init debug_objects_replace_static_objects(void)
1041{
1042 struct debug_bucket *db = obj_hash;
1043 struct hlist_node *tmp;
1044 struct debug_obj *obj, *new;
1045 HLIST_HEAD(objects);
1046 int i, cnt = 0;
1047
1048 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1049 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1050 if (!obj)
1051 goto free;
1052 kmemleak_ignore(obj);
1053 hlist_add_head(&obj->node, &objects);
1054 }
1055
1056
1057
1058
1059
1060
1061 local_irq_disable();
1062
1063
1064 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1065 hlist_del(&obj->node);
1066
1067 hlist_move_list(&objects, &obj_pool);
1068
1069
1070 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1071 hlist_move_list(&db->list, &objects);
1072
1073 hlist_for_each_entry(obj, &objects, node) {
1074 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1075 hlist_del(&new->node);
1076
1077 *new = *obj;
1078 hlist_add_head(&new->node, &db->list);
1079 cnt++;
1080 }
1081 }
1082 local_irq_enable();
1083
1084 printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
1085 obj_pool_used);
1086 return 0;
1087free:
1088 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1089 hlist_del(&obj->node);
1090 kmem_cache_free(obj_cache, obj);
1091 }
1092 return -ENOMEM;
1093}
1094
1095
1096
1097
1098
1099
1100
1101void __init debug_objects_mem_init(void)
1102{
1103 if (!debug_objects_enabled)
1104 return;
1105
1106 obj_cache = kmem_cache_create("debug_objects_cache",
1107 sizeof (struct debug_obj), 0,
1108 SLAB_DEBUG_OBJECTS, NULL);
1109
1110 if (!obj_cache || debug_objects_replace_static_objects()) {
1111 debug_objects_enabled = 0;
1112 if (obj_cache)
1113 kmem_cache_destroy(obj_cache);
1114 printk(KERN_WARNING "ODEBUG: out of memory.\n");
1115 } else
1116 debug_objects_selftest();
1117
1118
1119
1120
1121
1122 debug_objects_pool_size += num_possible_cpus() * 32;
1123 debug_objects_pool_min_level += num_possible_cpus() * 4;
1124}
1125