1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#define DISABLE_BRANCH_PROFILING
29#include <linux/mutex.h>
30#include <linux/sched.h>
31#include <linux/sched/clock.h>
32#include <linux/sched/task.h>
33#include <linux/sched/mm.h>
34#include <linux/delay.h>
35#include <linux/module.h>
36#include <linux/proc_fs.h>
37#include <linux/seq_file.h>
38#include <linux/spinlock.h>
39#include <linux/kallsyms.h>
40#include <linux/interrupt.h>
41#include <linux/stacktrace.h>
42#include <linux/debug_locks.h>
43#include <linux/irqflags.h>
44#include <linux/utsname.h>
45#include <linux/hash.h>
46#include <linux/ftrace.h>
47#include <linux/stringify.h>
48#include <linux/bitops.h>
49#include <linux/gfp.h>
50#include <linux/kmemcheck.h>
51#include <linux/random.h>
52#include <linux/jhash.h>
53
54#include <asm/sections.h>
55
56#include "lockdep_internals.h"
57
58#define CREATE_TRACE_POINTS
59#include <trace/events/lock.h>
60
61#ifdef CONFIG_PROVE_LOCKING
62int prove_locking = 1;
63module_param(prove_locking, int, 0644);
64#else
65#define prove_locking 0
66#endif
67
68#ifdef CONFIG_LOCK_STAT
69int lock_stat = 1;
70module_param(lock_stat, int, 0644);
71#else
72#define lock_stat 0
73#endif
74
75
76
77
78
79
80
81
82
83static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
84
85static int graph_lock(void)
86{
87 arch_spin_lock(&lockdep_lock);
88
89
90
91
92
93
94 if (!debug_locks) {
95 arch_spin_unlock(&lockdep_lock);
96 return 0;
97 }
98
99 current->lockdep_recursion++;
100 return 1;
101}
102
103static inline int graph_unlock(void)
104{
105 if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
106
107
108
109
110 return DEBUG_LOCKS_WARN_ON(1);
111 }
112
113 current->lockdep_recursion--;
114 arch_spin_unlock(&lockdep_lock);
115 return 0;
116}
117
118
119
120
121
122static inline int debug_locks_off_graph_unlock(void)
123{
124 int ret = debug_locks_off();
125
126 arch_spin_unlock(&lockdep_lock);
127
128 return ret;
129}
130
131unsigned long nr_list_entries;
132static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
133
134
135
136
137
138
139
140unsigned long nr_lock_classes;
141static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
142
143static inline struct lock_class *hlock_class(struct held_lock *hlock)
144{
145 if (!hlock->class_idx) {
146
147
148
149 DEBUG_LOCKS_WARN_ON(1);
150 return NULL;
151 }
152 return lock_classes + hlock->class_idx - 1;
153}
154
155#ifdef CONFIG_LOCK_STAT
156static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
157
158static inline u64 lockstat_clock(void)
159{
160 return local_clock();
161}
162
163static int lock_point(unsigned long points[], unsigned long ip)
164{
165 int i;
166
167 for (i = 0; i < LOCKSTAT_POINTS; i++) {
168 if (points[i] == 0) {
169 points[i] = ip;
170 break;
171 }
172 if (points[i] == ip)
173 break;
174 }
175
176 return i;
177}
178
179static void lock_time_inc(struct lock_time *lt, u64 time)
180{
181 if (time > lt->max)
182 lt->max = time;
183
184 if (time < lt->min || !lt->nr)
185 lt->min = time;
186
187 lt->total += time;
188 lt->nr++;
189}
190
191static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
192{
193 if (!src->nr)
194 return;
195
196 if (src->max > dst->max)
197 dst->max = src->max;
198
199 if (src->min < dst->min || !dst->nr)
200 dst->min = src->min;
201
202 dst->total += src->total;
203 dst->nr += src->nr;
204}
205
206struct lock_class_stats lock_stats(struct lock_class *class)
207{
208 struct lock_class_stats stats;
209 int cpu, i;
210
211 memset(&stats, 0, sizeof(struct lock_class_stats));
212 for_each_possible_cpu(cpu) {
213 struct lock_class_stats *pcs =
214 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
215
216 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
217 stats.contention_point[i] += pcs->contention_point[i];
218
219 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
220 stats.contending_point[i] += pcs->contending_point[i];
221
222 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
223 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
224
225 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
226 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
227
228 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
229 stats.bounces[i] += pcs->bounces[i];
230 }
231
232 return stats;
233}
234
235void clear_lock_stats(struct lock_class *class)
236{
237 int cpu;
238
239 for_each_possible_cpu(cpu) {
240 struct lock_class_stats *cpu_stats =
241 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
242
243 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
244 }
245 memset(class->contention_point, 0, sizeof(class->contention_point));
246 memset(class->contending_point, 0, sizeof(class->contending_point));
247}
248
249static struct lock_class_stats *get_lock_stats(struct lock_class *class)
250{
251 return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
252}
253
254static void put_lock_stats(struct lock_class_stats *stats)
255{
256 put_cpu_var(cpu_lock_stats);
257}
258
259static void lock_release_holdtime(struct held_lock *hlock)
260{
261 struct lock_class_stats *stats;
262 u64 holdtime;
263
264 if (!lock_stat)
265 return;
266
267 holdtime = lockstat_clock() - hlock->holdtime_stamp;
268
269 stats = get_lock_stats(hlock_class(hlock));
270 if (hlock->read)
271 lock_time_inc(&stats->read_holdtime, holdtime);
272 else
273 lock_time_inc(&stats->write_holdtime, holdtime);
274 put_lock_stats(stats);
275}
276#else
277static inline void lock_release_holdtime(struct held_lock *hlock)
278{
279}
280#endif
281
282
283
284
285
286
287LIST_HEAD(all_lock_classes);
288
289
290
291
292#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
293#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
294#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
295#define classhashentry(key) (classhash_table + __classhashfn((key)))
296
297static struct hlist_head classhash_table[CLASSHASH_SIZE];
298
299
300
301
302
303#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
304#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
305#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
306#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
307
308static struct hlist_head chainhash_table[CHAINHASH_SIZE];
309
310
311
312
313
314
315
316static inline u64 iterate_chain_key(u64 key, u32 idx)
317{
318 u32 k0 = key, k1 = key >> 32;
319
320 __jhash_mix(idx, k0, k1);
321
322 return k0 | (u64)k1 << 32;
323}
324
325void lockdep_off(void)
326{
327 current->lockdep_recursion++;
328}
329EXPORT_SYMBOL(lockdep_off);
330
331void lockdep_on(void)
332{
333 current->lockdep_recursion--;
334}
335EXPORT_SYMBOL(lockdep_on);
336
337
338
339
340
341#define VERBOSE 0
342#define VERY_VERBOSE 0
343
344#if VERBOSE
345# define HARDIRQ_VERBOSE 1
346# define SOFTIRQ_VERBOSE 1
347# define RECLAIM_VERBOSE 1
348#else
349# define HARDIRQ_VERBOSE 0
350# define SOFTIRQ_VERBOSE 0
351# define RECLAIM_VERBOSE 0
352#endif
353
354#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
355
356
357
358static int class_filter(struct lock_class *class)
359{
360#if 0
361
362 if (class->name_version == 1 &&
363 !strcmp(class->name, "lockname"))
364 return 1;
365 if (class->name_version == 1 &&
366 !strcmp(class->name, "&struct->lockfield"))
367 return 1;
368#endif
369
370 return 0;
371}
372#endif
373
374static int verbose(struct lock_class *class)
375{
376#if VERBOSE
377 return class_filter(class);
378#endif
379 return 0;
380}
381
382
383
384
385
386unsigned long nr_stack_trace_entries;
387static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
388
389static void print_lockdep_off(const char *bug_msg)
390{
391 printk(KERN_DEBUG "%s\n", bug_msg);
392 printk(KERN_DEBUG "turning off the locking correctness validator.\n");
393#ifdef CONFIG_LOCK_STAT
394 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
395#endif
396}
397
398static int save_trace(struct stack_trace *trace)
399{
400 trace->nr_entries = 0;
401 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
402 trace->entries = stack_trace + nr_stack_trace_entries;
403
404 trace->skip = 3;
405
406 save_stack_trace(trace);
407
408
409
410
411
412
413
414
415 if (trace->nr_entries != 0 &&
416 trace->entries[trace->nr_entries-1] == ULONG_MAX)
417 trace->nr_entries--;
418
419 trace->max_entries = trace->nr_entries;
420
421 nr_stack_trace_entries += trace->nr_entries;
422
423 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
424 if (!debug_locks_off_graph_unlock())
425 return 0;
426
427 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
428 dump_stack();
429
430 return 0;
431 }
432
433 return 1;
434}
435
436unsigned int nr_hardirq_chains;
437unsigned int nr_softirq_chains;
438unsigned int nr_process_chains;
439unsigned int max_lockdep_depth;
440
441#ifdef CONFIG_DEBUG_LOCKDEP
442
443
444
445DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
446#endif
447
448
449
450
451
452#define __USAGE(__STATE) \
453 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
454 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
455 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
456 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
457
458static const char *usage_str[] =
459{
460#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
461#include "lockdep_states.h"
462#undef LOCKDEP_STATE
463 [LOCK_USED] = "INITIAL USE",
464};
465
466const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
467{
468 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
469}
470
471static inline unsigned long lock_flag(enum lock_usage_bit bit)
472{
473 return 1UL << bit;
474}
475
476static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
477{
478 char c = '.';
479
480 if (class->usage_mask & lock_flag(bit + 2))
481 c = '+';
482 if (class->usage_mask & lock_flag(bit)) {
483 c = '-';
484 if (class->usage_mask & lock_flag(bit + 2))
485 c = '?';
486 }
487
488 return c;
489}
490
491void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
492{
493 int i = 0;
494
495#define LOCKDEP_STATE(__STATE) \
496 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
497 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
498#include "lockdep_states.h"
499#undef LOCKDEP_STATE
500
501 usage[i] = '\0';
502}
503
504static void __print_lock_name(struct lock_class *class)
505{
506 char str[KSYM_NAME_LEN];
507 const char *name;
508
509 name = class->name;
510 if (!name) {
511 name = __get_key_name(class->key, str);
512 printk(KERN_CONT "%s", name);
513 } else {
514 printk(KERN_CONT "%s", name);
515 if (class->name_version > 1)
516 printk(KERN_CONT "#%d", class->name_version);
517 if (class->subclass)
518 printk(KERN_CONT "/%d", class->subclass);
519 }
520}
521
522static void print_lock_name(struct lock_class *class)
523{
524 char usage[LOCK_USAGE_CHARS];
525
526 get_usage_chars(class, usage);
527
528 printk(KERN_CONT " (");
529 __print_lock_name(class);
530 printk(KERN_CONT "){%s}", usage);
531}
532
533static void print_lockdep_cache(struct lockdep_map *lock)
534{
535 const char *name;
536 char str[KSYM_NAME_LEN];
537
538 name = lock->name;
539 if (!name)
540 name = __get_key_name(lock->key->subkeys, str);
541
542 printk(KERN_CONT "%s", name);
543}
544
545static void print_lock(struct held_lock *hlock)
546{
547
548
549
550
551 unsigned int class_idx = hlock->class_idx;
552
553
554 barrier();
555
556 if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
557 printk(KERN_CONT "<RELEASED>\n");
558 return;
559 }
560
561 print_lock_name(lock_classes + class_idx - 1);
562 printk(KERN_CONT ", at: [<%p>] %pS\n",
563 (void *)hlock->acquire_ip, (void *)hlock->acquire_ip);
564}
565
566static void lockdep_print_held_locks(struct task_struct *curr)
567{
568 int i, depth = curr->lockdep_depth;
569
570 if (!depth) {
571 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
572 return;
573 }
574 printk("%d lock%s held by %s/%d:\n",
575 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
576
577 for (i = 0; i < depth; i++) {
578 printk(" #%d: ", i);
579 print_lock(curr->held_locks + i);
580 }
581}
582
583static void print_kernel_ident(void)
584{
585 printk("%s %.*s %s\n", init_utsname()->release,
586 (int)strcspn(init_utsname()->version, " "),
587 init_utsname()->version,
588 print_tainted());
589}
590
591static int very_verbose(struct lock_class *class)
592{
593#if VERY_VERBOSE
594 return class_filter(class);
595#endif
596 return 0;
597}
598
599
600
601
602#ifdef __KERNEL__
603static int static_obj(void *obj)
604{
605 unsigned long start = (unsigned long) &_stext,
606 end = (unsigned long) &_end,
607 addr = (unsigned long) obj;
608
609
610
611
612 if ((addr >= start) && (addr < end))
613 return 1;
614
615 if (arch_is_kernel_data(addr))
616 return 1;
617
618
619
620
621 if (is_kernel_percpu_address(addr))
622 return 1;
623
624
625
626
627 return is_module_address(addr) || is_module_percpu_address(addr);
628}
629#endif
630
631
632
633
634
635static int count_matching_names(struct lock_class *new_class)
636{
637 struct lock_class *class;
638 int count = 0;
639
640 if (!new_class->name)
641 return 0;
642
643 list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
644 if (new_class->key - new_class->subclass == class->key)
645 return class->name_version;
646 if (class->name && !strcmp(class->name, new_class->name))
647 count = max(count, class->name_version);
648 }
649
650 return count + 1;
651}
652
653
654
655
656
657
658static inline struct lock_class *
659look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
660{
661 struct lockdep_subclass_key *key;
662 struct hlist_head *hash_head;
663 struct lock_class *class;
664 bool is_static = false;
665
666 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
667 debug_locks_off();
668 printk(KERN_ERR
669 "BUG: looking up invalid subclass: %u\n", subclass);
670 printk(KERN_ERR
671 "turning off the locking correctness validator.\n");
672 dump_stack();
673 return NULL;
674 }
675
676
677
678
679
680
681
682 if (unlikely(!lock->key)) {
683 unsigned long can_addr, addr = (unsigned long)lock;
684
685 if (__is_kernel_percpu_address(addr, &can_addr))
686 lock->key = (void *)can_addr;
687 else if (__is_module_percpu_address(addr, &can_addr))
688 lock->key = (void *)can_addr;
689 else if (static_obj(lock))
690 lock->key = (void *)lock;
691 else
692 return ERR_PTR(-EINVAL);
693 is_static = true;
694 }
695
696
697
698
699
700
701
702 BUILD_BUG_ON(sizeof(struct lock_class_key) >
703 sizeof(struct lockdep_map));
704
705 key = lock->key->subkeys + subclass;
706
707 hash_head = classhashentry(key);
708
709
710
711
712 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
713 return NULL;
714
715 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
716 if (class->key == key) {
717
718
719
720
721 WARN_ON_ONCE(class->name != lock->name);
722 return class;
723 }
724 }
725
726 return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
727}
728
729
730
731
732
733
734static struct lock_class *
735register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
736{
737 struct lockdep_subclass_key *key;
738 struct hlist_head *hash_head;
739 struct lock_class *class;
740
741 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
742
743 class = look_up_lock_class(lock, subclass);
744 if (likely(!IS_ERR_OR_NULL(class)))
745 goto out_set_class_cache;
746
747
748
749
750 if (IS_ERR(class)) {
751 debug_locks_off();
752 printk("INFO: trying to register non-static key.\n");
753 printk("the code is fine but needs lockdep annotation.\n");
754 printk("turning off the locking correctness validator.\n");
755 dump_stack();
756 return NULL;
757 }
758
759 key = lock->key->subkeys + subclass;
760 hash_head = classhashentry(key);
761
762 if (!graph_lock()) {
763 return NULL;
764 }
765
766
767
768
769 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
770 if (class->key == key)
771 goto out_unlock_set;
772 }
773
774
775
776
777
778 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
779 if (!debug_locks_off_graph_unlock()) {
780 return NULL;
781 }
782
783 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
784 dump_stack();
785 return NULL;
786 }
787 class = lock_classes + nr_lock_classes++;
788 debug_atomic_inc(nr_unused_locks);
789 class->key = key;
790 class->name = lock->name;
791 class->subclass = subclass;
792 INIT_LIST_HEAD(&class->lock_entry);
793 INIT_LIST_HEAD(&class->locks_before);
794 INIT_LIST_HEAD(&class->locks_after);
795 class->name_version = count_matching_names(class);
796
797
798
799
800 hlist_add_head_rcu(&class->hash_entry, hash_head);
801
802
803
804 list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
805
806 if (verbose(class)) {
807 graph_unlock();
808
809 printk("\nnew class %p: %s", class->key, class->name);
810 if (class->name_version > 1)
811 printk(KERN_CONT "#%d", class->name_version);
812 printk(KERN_CONT "\n");
813 dump_stack();
814
815 if (!graph_lock()) {
816 return NULL;
817 }
818 }
819out_unlock_set:
820 graph_unlock();
821
822out_set_class_cache:
823 if (!subclass || force)
824 lock->class_cache[0] = class;
825 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
826 lock->class_cache[subclass] = class;
827
828
829
830
831
832 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
833 return NULL;
834
835 return class;
836}
837
838#ifdef CONFIG_PROVE_LOCKING
839
840
841
842
843static struct lock_list *alloc_list_entry(void)
844{
845 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
846 if (!debug_locks_off_graph_unlock())
847 return NULL;
848
849 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
850 dump_stack();
851 return NULL;
852 }
853 return list_entries + nr_list_entries++;
854}
855
856
857
858
859static int add_lock_to_list(struct lock_class *this, struct list_head *head,
860 unsigned long ip, int distance,
861 struct stack_trace *trace)
862{
863 struct lock_list *entry;
864
865
866
867
868 entry = alloc_list_entry();
869 if (!entry)
870 return 0;
871
872 entry->class = this;
873 entry->distance = distance;
874 entry->trace = *trace;
875
876
877
878
879
880 list_add_tail_rcu(&entry->entry, head);
881
882 return 1;
883}
884
885
886
887
888#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
889#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
890
891
892
893
894
895
896
897struct circular_queue {
898 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
899 unsigned int front, rear;
900};
901
902static struct circular_queue lock_cq;
903
904unsigned int max_bfs_queue_depth;
905
906static unsigned int lockdep_dependency_gen_id;
907
908static inline void __cq_init(struct circular_queue *cq)
909{
910 cq->front = cq->rear = 0;
911 lockdep_dependency_gen_id++;
912}
913
914static inline int __cq_empty(struct circular_queue *cq)
915{
916 return (cq->front == cq->rear);
917}
918
919static inline int __cq_full(struct circular_queue *cq)
920{
921 return ((cq->rear + 1) & CQ_MASK) == cq->front;
922}
923
924static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
925{
926 if (__cq_full(cq))
927 return -1;
928
929 cq->element[cq->rear] = elem;
930 cq->rear = (cq->rear + 1) & CQ_MASK;
931 return 0;
932}
933
934static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
935{
936 if (__cq_empty(cq))
937 return -1;
938
939 *elem = cq->element[cq->front];
940 cq->front = (cq->front + 1) & CQ_MASK;
941 return 0;
942}
943
944static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
945{
946 return (cq->rear - cq->front) & CQ_MASK;
947}
948
949static inline void mark_lock_accessed(struct lock_list *lock,
950 struct lock_list *parent)
951{
952 unsigned long nr;
953
954 nr = lock - list_entries;
955 WARN_ON(nr >= nr_list_entries);
956 lock->parent = parent;
957 lock->class->dep_gen_id = lockdep_dependency_gen_id;
958}
959
960static inline unsigned long lock_accessed(struct lock_list *lock)
961{
962 unsigned long nr;
963
964 nr = lock - list_entries;
965 WARN_ON(nr >= nr_list_entries);
966 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
967}
968
969static inline struct lock_list *get_lock_parent(struct lock_list *child)
970{
971 return child->parent;
972}
973
974static inline int get_lock_depth(struct lock_list *child)
975{
976 int depth = 0;
977 struct lock_list *parent;
978
979 while ((parent = get_lock_parent(child))) {
980 child = parent;
981 depth++;
982 }
983 return depth;
984}
985
986static int __bfs(struct lock_list *source_entry,
987 void *data,
988 int (*match)(struct lock_list *entry, void *data),
989 struct lock_list **target_entry,
990 int forward)
991{
992 struct lock_list *entry;
993 struct list_head *head;
994 struct circular_queue *cq = &lock_cq;
995 int ret = 1;
996
997 if (match(source_entry, data)) {
998 *target_entry = source_entry;
999 ret = 0;
1000 goto exit;
1001 }
1002
1003 if (forward)
1004 head = &source_entry->class->locks_after;
1005 else
1006 head = &source_entry->class->locks_before;
1007
1008 if (list_empty(head))
1009 goto exit;
1010
1011 __cq_init(cq);
1012 __cq_enqueue(cq, (unsigned long)source_entry);
1013
1014 while (!__cq_empty(cq)) {
1015 struct lock_list *lock;
1016
1017 __cq_dequeue(cq, (unsigned long *)&lock);
1018
1019 if (!lock->class) {
1020 ret = -2;
1021 goto exit;
1022 }
1023
1024 if (forward)
1025 head = &lock->class->locks_after;
1026 else
1027 head = &lock->class->locks_before;
1028
1029 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1030
1031 list_for_each_entry_rcu(entry, head, entry) {
1032 if (!lock_accessed(entry)) {
1033 unsigned int cq_depth;
1034 mark_lock_accessed(entry, lock);
1035 if (match(entry, data)) {
1036 *target_entry = entry;
1037 ret = 0;
1038 goto exit;
1039 }
1040
1041 if (__cq_enqueue(cq, (unsigned long)entry)) {
1042 ret = -1;
1043 goto exit;
1044 }
1045 cq_depth = __cq_get_elem_count(cq);
1046 if (max_bfs_queue_depth < cq_depth)
1047 max_bfs_queue_depth = cq_depth;
1048 }
1049 }
1050 }
1051exit:
1052 return ret;
1053}
1054
1055static inline int __bfs_forwards(struct lock_list *src_entry,
1056 void *data,
1057 int (*match)(struct lock_list *entry, void *data),
1058 struct lock_list **target_entry)
1059{
1060 return __bfs(src_entry, data, match, target_entry, 1);
1061
1062}
1063
1064static inline int __bfs_backwards(struct lock_list *src_entry,
1065 void *data,
1066 int (*match)(struct lock_list *entry, void *data),
1067 struct lock_list **target_entry)
1068{
1069 return __bfs(src_entry, data, match, target_entry, 0);
1070
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static noinline int
1084print_circular_bug_entry(struct lock_list *target, int depth)
1085{
1086 if (debug_locks_silent)
1087 return 0;
1088 printk("\n-> #%u", depth);
1089 print_lock_name(target->class);
1090 printk(KERN_CONT ":\n");
1091 print_stack_trace(&target->trace, 6);
1092
1093 return 0;
1094}
1095
1096static void
1097print_circular_lock_scenario(struct held_lock *src,
1098 struct held_lock *tgt,
1099 struct lock_list *prt)
1100{
1101 struct lock_class *source = hlock_class(src);
1102 struct lock_class *target = hlock_class(tgt);
1103 struct lock_class *parent = prt->class;
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118 if (parent != source) {
1119 printk("Chain exists of:\n ");
1120 __print_lock_name(source);
1121 printk(KERN_CONT " --> ");
1122 __print_lock_name(parent);
1123 printk(KERN_CONT " --> ");
1124 __print_lock_name(target);
1125 printk(KERN_CONT "\n\n");
1126 }
1127
1128 printk(" Possible unsafe locking scenario:\n\n");
1129 printk(" CPU0 CPU1\n");
1130 printk(" ---- ----\n");
1131 printk(" lock(");
1132 __print_lock_name(target);
1133 printk(KERN_CONT ");\n");
1134 printk(" lock(");
1135 __print_lock_name(parent);
1136 printk(KERN_CONT ");\n");
1137 printk(" lock(");
1138 __print_lock_name(target);
1139 printk(KERN_CONT ");\n");
1140 printk(" lock(");
1141 __print_lock_name(source);
1142 printk(KERN_CONT ");\n");
1143 printk("\n *** DEADLOCK ***\n\n");
1144}
1145
1146
1147
1148
1149
1150static noinline int
1151print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1152 struct held_lock *check_src,
1153 struct held_lock *check_tgt)
1154{
1155 struct task_struct *curr = current;
1156
1157 if (debug_locks_silent)
1158 return 0;
1159
1160 pr_warn("\n");
1161 pr_warn("======================================================\n");
1162 pr_warn("WARNING: possible circular locking dependency detected\n");
1163 print_kernel_ident();
1164 pr_warn("------------------------------------------------------\n");
1165 pr_warn("%s/%d is trying to acquire lock:\n",
1166 curr->comm, task_pid_nr(curr));
1167 print_lock(check_src);
1168 pr_warn("\nbut task is already holding lock:\n");
1169 print_lock(check_tgt);
1170 pr_warn("\nwhich lock already depends on the new lock.\n\n");
1171 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1172
1173 print_circular_bug_entry(entry, depth);
1174
1175 return 0;
1176}
1177
1178static inline int class_equal(struct lock_list *entry, void *data)
1179{
1180 return entry->class == data;
1181}
1182
1183static noinline int print_circular_bug(struct lock_list *this,
1184 struct lock_list *target,
1185 struct held_lock *check_src,
1186 struct held_lock *check_tgt)
1187{
1188 struct task_struct *curr = current;
1189 struct lock_list *parent;
1190 struct lock_list *first_parent;
1191 int depth;
1192
1193 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1194 return 0;
1195
1196 if (!save_trace(&this->trace))
1197 return 0;
1198
1199 depth = get_lock_depth(target);
1200
1201 print_circular_bug_header(target, depth, check_src, check_tgt);
1202
1203 parent = get_lock_parent(target);
1204 first_parent = parent;
1205
1206 while (parent) {
1207 print_circular_bug_entry(parent, --depth);
1208 parent = get_lock_parent(parent);
1209 }
1210
1211 printk("\nother info that might help us debug this:\n\n");
1212 print_circular_lock_scenario(check_src, check_tgt,
1213 first_parent);
1214
1215 lockdep_print_held_locks(curr);
1216
1217 printk("\nstack backtrace:\n");
1218 dump_stack();
1219
1220 return 0;
1221}
1222
1223static noinline int print_bfs_bug(int ret)
1224{
1225 if (!debug_locks_off_graph_unlock())
1226 return 0;
1227
1228
1229
1230
1231 WARN(1, "lockdep bfs error:%d\n", ret);
1232
1233 return 0;
1234}
1235
1236static int noop_count(struct lock_list *entry, void *data)
1237{
1238 (*(unsigned long *)data)++;
1239 return 0;
1240}
1241
1242static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1243{
1244 unsigned long count = 0;
1245 struct lock_list *uninitialized_var(target_entry);
1246
1247 __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1248
1249 return count;
1250}
1251unsigned long lockdep_count_forward_deps(struct lock_class *class)
1252{
1253 unsigned long ret, flags;
1254 struct lock_list this;
1255
1256 this.parent = NULL;
1257 this.class = class;
1258
1259 local_irq_save(flags);
1260 arch_spin_lock(&lockdep_lock);
1261 ret = __lockdep_count_forward_deps(&this);
1262 arch_spin_unlock(&lockdep_lock);
1263 local_irq_restore(flags);
1264
1265 return ret;
1266}
1267
1268static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1269{
1270 unsigned long count = 0;
1271 struct lock_list *uninitialized_var(target_entry);
1272
1273 __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1274
1275 return count;
1276}
1277
1278unsigned long lockdep_count_backward_deps(struct lock_class *class)
1279{
1280 unsigned long ret, flags;
1281 struct lock_list this;
1282
1283 this.parent = NULL;
1284 this.class = class;
1285
1286 local_irq_save(flags);
1287 arch_spin_lock(&lockdep_lock);
1288 ret = __lockdep_count_backward_deps(&this);
1289 arch_spin_unlock(&lockdep_lock);
1290 local_irq_restore(flags);
1291
1292 return ret;
1293}
1294
1295
1296
1297
1298
1299static noinline int
1300check_noncircular(struct lock_list *root, struct lock_class *target,
1301 struct lock_list **target_entry)
1302{
1303 int result;
1304
1305 debug_atomic_inc(nr_cyclic_checks);
1306
1307 result = __bfs_forwards(root, target, class_equal, target_entry);
1308
1309 return result;
1310}
1311
1312#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1313
1314
1315
1316
1317
1318
1319static inline int usage_match(struct lock_list *entry, void *bit)
1320{
1321 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1322}
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336static int
1337find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1338 struct lock_list **target_entry)
1339{
1340 int result;
1341
1342 debug_atomic_inc(nr_find_usage_forwards_checks);
1343
1344 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1345
1346 return result;
1347}
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359static int
1360find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1361 struct lock_list **target_entry)
1362{
1363 int result;
1364
1365 debug_atomic_inc(nr_find_usage_backwards_checks);
1366
1367 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1368
1369 return result;
1370}
1371
1372static void print_lock_class_header(struct lock_class *class, int depth)
1373{
1374 int bit;
1375
1376 printk("%*s->", depth, "");
1377 print_lock_name(class);
1378 printk(KERN_CONT " ops: %lu", class->ops);
1379 printk(KERN_CONT " {\n");
1380
1381 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1382 if (class->usage_mask & (1 << bit)) {
1383 int len = depth;
1384
1385 len += printk("%*s %s", depth, "", usage_str[bit]);
1386 len += printk(KERN_CONT " at:\n");
1387 print_stack_trace(class->usage_traces + bit, len);
1388 }
1389 }
1390 printk("%*s }\n", depth, "");
1391
1392 printk("%*s ... key at: [<%p>] %pS\n",
1393 depth, "", class->key, class->key);
1394}
1395
1396
1397
1398
1399static void __used
1400print_shortest_lock_dependencies(struct lock_list *leaf,
1401 struct lock_list *root)
1402{
1403 struct lock_list *entry = leaf;
1404 int depth;
1405
1406
1407 depth = get_lock_depth(leaf);
1408
1409 do {
1410 print_lock_class_header(entry->class, depth);
1411 printk("%*s ... acquired at:\n", depth, "");
1412 print_stack_trace(&entry->trace, 2);
1413 printk("\n");
1414
1415 if (depth == 0 && (entry != root)) {
1416 printk("lockdep:%s bad path found in chain graph\n", __func__);
1417 break;
1418 }
1419
1420 entry = get_lock_parent(entry);
1421 depth--;
1422 } while (entry && (depth >= 0));
1423
1424 return;
1425}
1426
1427static void
1428print_irq_lock_scenario(struct lock_list *safe_entry,
1429 struct lock_list *unsafe_entry,
1430 struct lock_class *prev_class,
1431 struct lock_class *next_class)
1432{
1433 struct lock_class *safe_class = safe_entry->class;
1434 struct lock_class *unsafe_class = unsafe_entry->class;
1435 struct lock_class *middle_class = prev_class;
1436
1437 if (middle_class == safe_class)
1438 middle_class = next_class;
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453 if (middle_class != unsafe_class) {
1454 printk("Chain exists of:\n ");
1455 __print_lock_name(safe_class);
1456 printk(KERN_CONT " --> ");
1457 __print_lock_name(middle_class);
1458 printk(KERN_CONT " --> ");
1459 __print_lock_name(unsafe_class);
1460 printk(KERN_CONT "\n\n");
1461 }
1462
1463 printk(" Possible interrupt unsafe locking scenario:\n\n");
1464 printk(" CPU0 CPU1\n");
1465 printk(" ---- ----\n");
1466 printk(" lock(");
1467 __print_lock_name(unsafe_class);
1468 printk(KERN_CONT ");\n");
1469 printk(" local_irq_disable();\n");
1470 printk(" lock(");
1471 __print_lock_name(safe_class);
1472 printk(KERN_CONT ");\n");
1473 printk(" lock(");
1474 __print_lock_name(middle_class);
1475 printk(KERN_CONT ");\n");
1476 printk(" <Interrupt>\n");
1477 printk(" lock(");
1478 __print_lock_name(safe_class);
1479 printk(KERN_CONT ");\n");
1480 printk("\n *** DEADLOCK ***\n\n");
1481}
1482
1483static int
1484print_bad_irq_dependency(struct task_struct *curr,
1485 struct lock_list *prev_root,
1486 struct lock_list *next_root,
1487 struct lock_list *backwards_entry,
1488 struct lock_list *forwards_entry,
1489 struct held_lock *prev,
1490 struct held_lock *next,
1491 enum lock_usage_bit bit1,
1492 enum lock_usage_bit bit2,
1493 const char *irqclass)
1494{
1495 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1496 return 0;
1497
1498 pr_warn("\n");
1499 pr_warn("=====================================================\n");
1500 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1501 irqclass, irqclass);
1502 print_kernel_ident();
1503 pr_warn("-----------------------------------------------------\n");
1504 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1505 curr->comm, task_pid_nr(curr),
1506 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1507 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1508 curr->hardirqs_enabled,
1509 curr->softirqs_enabled);
1510 print_lock(next);
1511
1512 pr_warn("\nand this task is already holding:\n");
1513 print_lock(prev);
1514 pr_warn("which would create a new lock dependency:\n");
1515 print_lock_name(hlock_class(prev));
1516 pr_cont(" ->");
1517 print_lock_name(hlock_class(next));
1518 pr_cont("\n");
1519
1520 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1521 irqclass);
1522 print_lock_name(backwards_entry->class);
1523 pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1524
1525 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1526
1527 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1528 print_lock_name(forwards_entry->class);
1529 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1530 pr_warn("...");
1531
1532 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1533
1534 pr_warn("\nother info that might help us debug this:\n\n");
1535 print_irq_lock_scenario(backwards_entry, forwards_entry,
1536 hlock_class(prev), hlock_class(next));
1537
1538 lockdep_print_held_locks(curr);
1539
1540 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1541 if (!save_trace(&prev_root->trace))
1542 return 0;
1543 print_shortest_lock_dependencies(backwards_entry, prev_root);
1544
1545 pr_warn("\nthe dependencies between the lock to be acquired");
1546 pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
1547 if (!save_trace(&next_root->trace))
1548 return 0;
1549 print_shortest_lock_dependencies(forwards_entry, next_root);
1550
1551 pr_warn("\nstack backtrace:\n");
1552 dump_stack();
1553
1554 return 0;
1555}
1556
1557static int
1558check_usage(struct task_struct *curr, struct held_lock *prev,
1559 struct held_lock *next, enum lock_usage_bit bit_backwards,
1560 enum lock_usage_bit bit_forwards, const char *irqclass)
1561{
1562 int ret;
1563 struct lock_list this, that;
1564 struct lock_list *uninitialized_var(target_entry);
1565 struct lock_list *uninitialized_var(target_entry1);
1566
1567 this.parent = NULL;
1568
1569 this.class = hlock_class(prev);
1570 ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1571 if (ret < 0)
1572 return print_bfs_bug(ret);
1573 if (ret == 1)
1574 return ret;
1575
1576 that.parent = NULL;
1577 that.class = hlock_class(next);
1578 ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1579 if (ret < 0)
1580 return print_bfs_bug(ret);
1581 if (ret == 1)
1582 return ret;
1583
1584 return print_bad_irq_dependency(curr, &this, &that,
1585 target_entry, target_entry1,
1586 prev, next,
1587 bit_backwards, bit_forwards, irqclass);
1588}
1589
1590static const char *state_names[] = {
1591#define LOCKDEP_STATE(__STATE) \
1592 __stringify(__STATE),
1593#include "lockdep_states.h"
1594#undef LOCKDEP_STATE
1595};
1596
1597static const char *state_rnames[] = {
1598#define LOCKDEP_STATE(__STATE) \
1599 __stringify(__STATE)"-READ",
1600#include "lockdep_states.h"
1601#undef LOCKDEP_STATE
1602};
1603
1604static inline const char *state_name(enum lock_usage_bit bit)
1605{
1606 return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1607}
1608
1609static int exclusive_bit(int new_bit)
1610{
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622 int state = new_bit & ~3;
1623 int dir = new_bit & 2;
1624
1625
1626
1627
1628 return state | (dir ^ 2);
1629}
1630
1631static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1632 struct held_lock *next, enum lock_usage_bit bit)
1633{
1634
1635
1636
1637
1638
1639
1640 if (!check_usage(curr, prev, next, bit,
1641 exclusive_bit(bit), state_name(bit)))
1642 return 0;
1643
1644 bit++;
1645
1646
1647
1648
1649
1650
1651
1652 if (!check_usage(curr, prev, next, bit,
1653 exclusive_bit(bit), state_name(bit)))
1654 return 0;
1655
1656 return 1;
1657}
1658
1659static int
1660check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1661 struct held_lock *next)
1662{
1663#define LOCKDEP_STATE(__STATE) \
1664 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1665 return 0;
1666#include "lockdep_states.h"
1667#undef LOCKDEP_STATE
1668
1669 return 1;
1670}
1671
1672static void inc_chains(void)
1673{
1674 if (current->hardirq_context)
1675 nr_hardirq_chains++;
1676 else {
1677 if (current->softirq_context)
1678 nr_softirq_chains++;
1679 else
1680 nr_process_chains++;
1681 }
1682}
1683
1684#else
1685
1686static inline int
1687check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1688 struct held_lock *next)
1689{
1690 return 1;
1691}
1692
1693static inline void inc_chains(void)
1694{
1695 nr_process_chains++;
1696}
1697
1698#endif
1699
1700static void
1701print_deadlock_scenario(struct held_lock *nxt,
1702 struct held_lock *prv)
1703{
1704 struct lock_class *next = hlock_class(nxt);
1705 struct lock_class *prev = hlock_class(prv);
1706
1707 printk(" Possible unsafe locking scenario:\n\n");
1708 printk(" CPU0\n");
1709 printk(" ----\n");
1710 printk(" lock(");
1711 __print_lock_name(prev);
1712 printk(KERN_CONT ");\n");
1713 printk(" lock(");
1714 __print_lock_name(next);
1715 printk(KERN_CONT ");\n");
1716 printk("\n *** DEADLOCK ***\n\n");
1717 printk(" May be due to missing lock nesting notation\n\n");
1718}
1719
1720static int
1721print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1722 struct held_lock *next)
1723{
1724 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1725 return 0;
1726
1727 pr_warn("\n");
1728 pr_warn("============================================\n");
1729 pr_warn("WARNING: possible recursive locking detected\n");
1730 print_kernel_ident();
1731 pr_warn("--------------------------------------------\n");
1732 pr_warn("%s/%d is trying to acquire lock:\n",
1733 curr->comm, task_pid_nr(curr));
1734 print_lock(next);
1735 pr_warn("\nbut task is already holding lock:\n");
1736 print_lock(prev);
1737
1738 pr_warn("\nother info that might help us debug this:\n");
1739 print_deadlock_scenario(next, prev);
1740 lockdep_print_held_locks(curr);
1741
1742 pr_warn("\nstack backtrace:\n");
1743 dump_stack();
1744
1745 return 0;
1746}
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756static int
1757check_deadlock(struct task_struct *curr, struct held_lock *next,
1758 struct lockdep_map *next_instance, int read)
1759{
1760 struct held_lock *prev;
1761 struct held_lock *nest = NULL;
1762 int i;
1763
1764 for (i = 0; i < curr->lockdep_depth; i++) {
1765 prev = curr->held_locks + i;
1766
1767 if (prev->instance == next->nest_lock)
1768 nest = prev;
1769
1770 if (hlock_class(prev) != hlock_class(next))
1771 continue;
1772
1773
1774
1775
1776
1777 if ((read == 2) && prev->read)
1778 return 2;
1779
1780
1781
1782
1783
1784 if (nest)
1785 return 2;
1786
1787 return print_deadlock_bug(curr, prev, next);
1788 }
1789 return 1;
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814static int
1815check_prev_add(struct task_struct *curr, struct held_lock *prev,
1816 struct held_lock *next, int distance, int *stack_saved)
1817{
1818 struct lock_list *entry;
1819 int ret;
1820 struct lock_list this;
1821 struct lock_list *uninitialized_var(target_entry);
1822
1823
1824
1825
1826
1827
1828
1829 static struct stack_trace trace;
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840 this.class = hlock_class(next);
1841 this.parent = NULL;
1842 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1843 if (unlikely(!ret))
1844 return print_circular_bug(&this, target_entry, next, prev);
1845 else if (unlikely(ret < 0))
1846 return print_bfs_bug(ret);
1847
1848 if (!check_prev_add_irq(curr, prev, next))
1849 return 0;
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859 if (next->read == 2 || prev->read == 2)
1860 return 1;
1861
1862
1863
1864
1865
1866
1867
1868
1869 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1870 if (entry->class == hlock_class(next)) {
1871 if (distance == 1)
1872 entry->distance = 1;
1873 return 2;
1874 }
1875 }
1876
1877 if (!*stack_saved) {
1878 if (!save_trace(&trace))
1879 return 0;
1880 *stack_saved = 1;
1881 }
1882
1883
1884
1885
1886
1887 ret = add_lock_to_list(hlock_class(next),
1888 &hlock_class(prev)->locks_after,
1889 next->acquire_ip, distance, &trace);
1890
1891 if (!ret)
1892 return 0;
1893
1894 ret = add_lock_to_list(hlock_class(prev),
1895 &hlock_class(next)->locks_before,
1896 next->acquire_ip, distance, &trace);
1897 if (!ret)
1898 return 0;
1899
1900
1901
1902
1903 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1904
1905 *stack_saved = 0;
1906 graph_unlock();
1907 printk("\n new dependency: ");
1908 print_lock_name(hlock_class(prev));
1909 printk(KERN_CONT " => ");
1910 print_lock_name(hlock_class(next));
1911 printk(KERN_CONT "\n");
1912 dump_stack();
1913 return graph_lock();
1914 }
1915 return 1;
1916}
1917
1918
1919
1920
1921
1922
1923
1924static int
1925check_prevs_add(struct task_struct *curr, struct held_lock *next)
1926{
1927 int depth = curr->lockdep_depth;
1928 int stack_saved = 0;
1929 struct held_lock *hlock;
1930
1931
1932
1933
1934
1935
1936 if (!depth)
1937 goto out_bug;
1938
1939
1940
1941
1942 if (curr->held_locks[depth].irq_context !=
1943 curr->held_locks[depth-1].irq_context)
1944 goto out_bug;
1945
1946 for (;;) {
1947 int distance = curr->lockdep_depth - depth + 1;
1948 hlock = curr->held_locks + depth - 1;
1949
1950
1951
1952
1953 if (hlock->read != 2 && hlock->check) {
1954 if (!check_prev_add(curr, hlock, next,
1955 distance, &stack_saved))
1956 return 0;
1957
1958
1959
1960
1961
1962
1963 if (!hlock->trylock)
1964 break;
1965 }
1966 depth--;
1967
1968
1969
1970 if (!depth)
1971 break;
1972
1973
1974
1975 if (curr->held_locks[depth].irq_context !=
1976 curr->held_locks[depth-1].irq_context)
1977 break;
1978 }
1979 return 1;
1980out_bug:
1981 if (!debug_locks_off_graph_unlock())
1982 return 0;
1983
1984
1985
1986
1987
1988
1989 WARN_ON(1);
1990
1991 return 0;
1992}
1993
1994unsigned long nr_lock_chains;
1995struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1996int nr_chain_hlocks;
1997static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1998
1999struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
2000{
2001 return lock_classes + chain_hlocks[chain->base + i];
2002}
2003
2004
2005
2006
2007static inline int get_first_held_lock(struct task_struct *curr,
2008 struct held_lock *hlock)
2009{
2010 int i;
2011 struct held_lock *hlock_curr;
2012
2013 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2014 hlock_curr = curr->held_locks + i;
2015 if (hlock_curr->irq_context != hlock->irq_context)
2016 break;
2017
2018 }
2019
2020 return ++i;
2021}
2022
2023#ifdef CONFIG_DEBUG_LOCKDEP
2024
2025
2026
2027static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2028{
2029 u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2030
2031 printk(" class_idx:%d -> chain_key:%016Lx",
2032 class_idx,
2033 (unsigned long long)new_chain_key);
2034 return new_chain_key;
2035}
2036
2037static void
2038print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2039{
2040 struct held_lock *hlock;
2041 u64 chain_key = 0;
2042 int depth = curr->lockdep_depth;
2043 int i;
2044
2045 printk("depth: %u\n", depth + 1);
2046 for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2047 hlock = curr->held_locks + i;
2048 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2049
2050 print_lock(hlock);
2051 }
2052
2053 print_chain_key_iteration(hlock_next->class_idx, chain_key);
2054 print_lock(hlock_next);
2055}
2056
2057static void print_chain_keys_chain(struct lock_chain *chain)
2058{
2059 int i;
2060 u64 chain_key = 0;
2061 int class_id;
2062
2063 printk("depth: %u\n", chain->depth);
2064 for (i = 0; i < chain->depth; i++) {
2065 class_id = chain_hlocks[chain->base + i];
2066 chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2067
2068 print_lock_name(lock_classes + class_id);
2069 printk("\n");
2070 }
2071}
2072
2073static void print_collision(struct task_struct *curr,
2074 struct held_lock *hlock_next,
2075 struct lock_chain *chain)
2076{
2077 pr_warn("\n");
2078 pr_warn("============================\n");
2079 pr_warn("WARNING: chain_key collision\n");
2080 print_kernel_ident();
2081 pr_warn("----------------------------\n");
2082 pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2083 pr_warn("Hash chain already cached but the contents don't match!\n");
2084
2085 pr_warn("Held locks:");
2086 print_chain_keys_held_locks(curr, hlock_next);
2087
2088 pr_warn("Locks in cached chain:");
2089 print_chain_keys_chain(chain);
2090
2091 pr_warn("\nstack backtrace:\n");
2092 dump_stack();
2093}
2094#endif
2095
2096
2097
2098
2099
2100
2101
2102static int check_no_collision(struct task_struct *curr,
2103 struct held_lock *hlock,
2104 struct lock_chain *chain)
2105{
2106#ifdef CONFIG_DEBUG_LOCKDEP
2107 int i, j, id;
2108
2109 i = get_first_held_lock(curr, hlock);
2110
2111 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2112 print_collision(curr, hlock, chain);
2113 return 0;
2114 }
2115
2116 for (j = 0; j < chain->depth - 1; j++, i++) {
2117 id = curr->held_locks[i].class_idx - 1;
2118
2119 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2120 print_collision(curr, hlock, chain);
2121 return 0;
2122 }
2123 }
2124#endif
2125 return 1;
2126}
2127
2128
2129
2130
2131
2132
2133
2134static inline int lookup_chain_cache(struct task_struct *curr,
2135 struct held_lock *hlock,
2136 u64 chain_key)
2137{
2138 struct lock_class *class = hlock_class(hlock);
2139 struct hlist_head *hash_head = chainhashentry(chain_key);
2140 struct lock_chain *chain;
2141 int i, j;
2142
2143
2144
2145
2146
2147
2148 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2149 return 0;
2150
2151
2152
2153
2154 hlist_for_each_entry_rcu(chain, hash_head, entry) {
2155 if (chain->chain_key == chain_key) {
2156cache_hit:
2157 debug_atomic_inc(chain_lookup_hits);
2158 if (!check_no_collision(curr, hlock, chain))
2159 return 0;
2160
2161 if (very_verbose(class))
2162 printk("\nhash chain already cached, key: "
2163 "%016Lx tail class: [%p] %s\n",
2164 (unsigned long long)chain_key,
2165 class->key, class->name);
2166 return 0;
2167 }
2168 }
2169 if (very_verbose(class))
2170 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
2171 (unsigned long long)chain_key, class->key, class->name);
2172
2173
2174
2175
2176 if (!graph_lock())
2177 return 0;
2178
2179
2180
2181 hlist_for_each_entry(chain, hash_head, entry) {
2182 if (chain->chain_key == chain_key) {
2183 graph_unlock();
2184 goto cache_hit;
2185 }
2186 }
2187 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2188 if (!debug_locks_off_graph_unlock())
2189 return 0;
2190
2191 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2192 dump_stack();
2193 return 0;
2194 }
2195 chain = lock_chains + nr_lock_chains++;
2196 chain->chain_key = chain_key;
2197 chain->irq_context = hlock->irq_context;
2198 i = get_first_held_lock(curr, hlock);
2199 chain->depth = curr->lockdep_depth + 1 - i;
2200
2201 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2202 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
2203 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2204
2205 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2206 chain->base = nr_chain_hlocks;
2207 for (j = 0; j < chain->depth - 1; j++, i++) {
2208 int lock_id = curr->held_locks[i].class_idx - 1;
2209 chain_hlocks[chain->base + j] = lock_id;
2210 }
2211 chain_hlocks[chain->base + j] = class - lock_classes;
2212 }
2213
2214 if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
2215 nr_chain_hlocks += chain->depth;
2216
2217#ifdef CONFIG_DEBUG_LOCKDEP
2218
2219
2220
2221 if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
2222 if (!debug_locks_off_graph_unlock())
2223 return 0;
2224
2225 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2226 dump_stack();
2227 return 0;
2228 }
2229#endif
2230
2231 hlist_add_head_rcu(&chain->entry, hash_head);
2232 debug_atomic_inc(chain_lookup_misses);
2233 inc_chains();
2234
2235 return 1;
2236}
2237
2238static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2239 struct held_lock *hlock, int chain_head, u64 chain_key)
2240{
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251 if (!hlock->trylock && hlock->check &&
2252 lookup_chain_cache(curr, hlock, chain_key)) {
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265 int ret = check_deadlock(curr, hlock, lock, hlock->read);
2266
2267 if (!ret)
2268 return 0;
2269
2270
2271
2272
2273
2274 if (ret == 2)
2275 hlock->read = 2;
2276
2277
2278
2279
2280 if (!chain_head && ret != 2)
2281 if (!check_prevs_add(curr, hlock))
2282 return 0;
2283 graph_unlock();
2284 } else
2285
2286 if (unlikely(!debug_locks))
2287 return 0;
2288
2289 return 1;
2290}
2291#else
2292static inline int validate_chain(struct task_struct *curr,
2293 struct lockdep_map *lock, struct held_lock *hlock,
2294 int chain_head, u64 chain_key)
2295{
2296 return 1;
2297}
2298#endif
2299
2300
2301
2302
2303
2304static void check_chain_key(struct task_struct *curr)
2305{
2306#ifdef CONFIG_DEBUG_LOCKDEP
2307 struct held_lock *hlock, *prev_hlock = NULL;
2308 unsigned int i;
2309 u64 chain_key = 0;
2310
2311 for (i = 0; i < curr->lockdep_depth; i++) {
2312 hlock = curr->held_locks + i;
2313 if (chain_key != hlock->prev_chain_key) {
2314 debug_locks_off();
2315
2316
2317
2318
2319 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2320 curr->lockdep_depth, i,
2321 (unsigned long long)chain_key,
2322 (unsigned long long)hlock->prev_chain_key);
2323 return;
2324 }
2325
2326
2327
2328 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2329 return;
2330
2331 if (prev_hlock && (prev_hlock->irq_context !=
2332 hlock->irq_context))
2333 chain_key = 0;
2334 chain_key = iterate_chain_key(chain_key, hlock->class_idx);
2335 prev_hlock = hlock;
2336 }
2337 if (chain_key != curr->curr_chain_key) {
2338 debug_locks_off();
2339
2340
2341
2342
2343 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2344 curr->lockdep_depth, i,
2345 (unsigned long long)chain_key,
2346 (unsigned long long)curr->curr_chain_key);
2347 }
2348#endif
2349}
2350
2351static void
2352print_usage_bug_scenario(struct held_lock *lock)
2353{
2354 struct lock_class *class = hlock_class(lock);
2355
2356 printk(" Possible unsafe locking scenario:\n\n");
2357 printk(" CPU0\n");
2358 printk(" ----\n");
2359 printk(" lock(");
2360 __print_lock_name(class);
2361 printk(KERN_CONT ");\n");
2362 printk(" <Interrupt>\n");
2363 printk(" lock(");
2364 __print_lock_name(class);
2365 printk(KERN_CONT ");\n");
2366 printk("\n *** DEADLOCK ***\n\n");
2367}
2368
2369static int
2370print_usage_bug(struct task_struct *curr, struct held_lock *this,
2371 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2372{
2373 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2374 return 0;
2375
2376 pr_warn("\n");
2377 pr_warn("================================\n");
2378 pr_warn("WARNING: inconsistent lock state\n");
2379 print_kernel_ident();
2380 pr_warn("--------------------------------\n");
2381
2382 pr_warn("inconsistent {%s} -> {%s} usage.\n",
2383 usage_str[prev_bit], usage_str[new_bit]);
2384
2385 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2386 curr->comm, task_pid_nr(curr),
2387 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2388 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2389 trace_hardirqs_enabled(curr),
2390 trace_softirqs_enabled(curr));
2391 print_lock(this);
2392
2393 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2394 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2395
2396 print_irqtrace_events(curr);
2397 pr_warn("\nother info that might help us debug this:\n");
2398 print_usage_bug_scenario(this);
2399
2400 lockdep_print_held_locks(curr);
2401
2402 pr_warn("\nstack backtrace:\n");
2403 dump_stack();
2404
2405 return 0;
2406}
2407
2408
2409
2410
2411static inline int
2412valid_state(struct task_struct *curr, struct held_lock *this,
2413 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2414{
2415 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2416 return print_usage_bug(curr, this, bad_bit, new_bit);
2417 return 1;
2418}
2419
2420static int mark_lock(struct task_struct *curr, struct held_lock *this,
2421 enum lock_usage_bit new_bit);
2422
2423#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2424
2425
2426
2427
2428static int
2429print_irq_inversion_bug(struct task_struct *curr,
2430 struct lock_list *root, struct lock_list *other,
2431 struct held_lock *this, int forwards,
2432 const char *irqclass)
2433{
2434 struct lock_list *entry = other;
2435 struct lock_list *middle = NULL;
2436 int depth;
2437
2438 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2439 return 0;
2440
2441 pr_warn("\n");
2442 pr_warn("========================================================\n");
2443 pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2444 print_kernel_ident();
2445 pr_warn("--------------------------------------------------------\n");
2446 pr_warn("%s/%d just changed the state of lock:\n",
2447 curr->comm, task_pid_nr(curr));
2448 print_lock(this);
2449 if (forwards)
2450 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2451 else
2452 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2453 print_lock_name(other->class);
2454 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2455
2456 pr_warn("\nother info that might help us debug this:\n");
2457
2458
2459 depth = get_lock_depth(other);
2460 do {
2461 if (depth == 0 && (entry != root)) {
2462 pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
2463 break;
2464 }
2465 middle = entry;
2466 entry = get_lock_parent(entry);
2467 depth--;
2468 } while (entry && entry != root && (depth >= 0));
2469 if (forwards)
2470 print_irq_lock_scenario(root, other,
2471 middle ? middle->class : root->class, other->class);
2472 else
2473 print_irq_lock_scenario(other, root,
2474 middle ? middle->class : other->class, root->class);
2475
2476 lockdep_print_held_locks(curr);
2477
2478 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2479 if (!save_trace(&root->trace))
2480 return 0;
2481 print_shortest_lock_dependencies(other, root);
2482
2483 pr_warn("\nstack backtrace:\n");
2484 dump_stack();
2485
2486 return 0;
2487}
2488
2489
2490
2491
2492
2493static int
2494check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2495 enum lock_usage_bit bit, const char *irqclass)
2496{
2497 int ret;
2498 struct lock_list root;
2499 struct lock_list *uninitialized_var(target_entry);
2500
2501 root.parent = NULL;
2502 root.class = hlock_class(this);
2503 ret = find_usage_forwards(&root, bit, &target_entry);
2504 if (ret < 0)
2505 return print_bfs_bug(ret);
2506 if (ret == 1)
2507 return ret;
2508
2509 return print_irq_inversion_bug(curr, &root, target_entry,
2510 this, 1, irqclass);
2511}
2512
2513
2514
2515
2516
2517static int
2518check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2519 enum lock_usage_bit bit, const char *irqclass)
2520{
2521 int ret;
2522 struct lock_list root;
2523 struct lock_list *uninitialized_var(target_entry);
2524
2525 root.parent = NULL;
2526 root.class = hlock_class(this);
2527 ret = find_usage_backwards(&root, bit, &target_entry);
2528 if (ret < 0)
2529 return print_bfs_bug(ret);
2530 if (ret == 1)
2531 return ret;
2532
2533 return print_irq_inversion_bug(curr, &root, target_entry,
2534 this, 0, irqclass);
2535}
2536
2537void print_irqtrace_events(struct task_struct *curr)
2538{
2539 printk("irq event stamp: %u\n", curr->irq_events);
2540 printk("hardirqs last enabled at (%u): [<%p>] %pS\n",
2541 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
2542 (void *)curr->hardirq_enable_ip);
2543 printk("hardirqs last disabled at (%u): [<%p>] %pS\n",
2544 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
2545 (void *)curr->hardirq_disable_ip);
2546 printk("softirqs last enabled at (%u): [<%p>] %pS\n",
2547 curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
2548 (void *)curr->softirq_enable_ip);
2549 printk("softirqs last disabled at (%u): [<%p>] %pS\n",
2550 curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
2551 (void *)curr->softirq_disable_ip);
2552}
2553
2554static int HARDIRQ_verbose(struct lock_class *class)
2555{
2556#if HARDIRQ_VERBOSE
2557 return class_filter(class);
2558#endif
2559 return 0;
2560}
2561
2562static int SOFTIRQ_verbose(struct lock_class *class)
2563{
2564#if SOFTIRQ_VERBOSE
2565 return class_filter(class);
2566#endif
2567 return 0;
2568}
2569
2570static int RECLAIM_FS_verbose(struct lock_class *class)
2571{
2572#if RECLAIM_VERBOSE
2573 return class_filter(class);
2574#endif
2575 return 0;
2576}
2577
2578#define STRICT_READ_CHECKS 1
2579
2580static int (*state_verbose_f[])(struct lock_class *class) = {
2581#define LOCKDEP_STATE(__STATE) \
2582 __STATE##_verbose,
2583#include "lockdep_states.h"
2584#undef LOCKDEP_STATE
2585};
2586
2587static inline int state_verbose(enum lock_usage_bit bit,
2588 struct lock_class *class)
2589{
2590 return state_verbose_f[bit >> 2](class);
2591}
2592
2593typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2594 enum lock_usage_bit bit, const char *name);
2595
2596static int
2597mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2598 enum lock_usage_bit new_bit)
2599{
2600 int excl_bit = exclusive_bit(new_bit);
2601 int read = new_bit & 1;
2602 int dir = new_bit & 2;
2603
2604
2605
2606
2607
2608
2609
2610
2611 check_usage_f usage = dir ?
2612 check_usage_backwards : check_usage_forwards;
2613
2614
2615
2616
2617
2618 if (!valid_state(curr, this, new_bit, excl_bit))
2619 return 0;
2620
2621
2622
2623
2624
2625 if ((!read || !dir || STRICT_READ_CHECKS) &&
2626 !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2627 return 0;
2628
2629
2630
2631
2632 if (!read) {
2633 if (!valid_state(curr, this, new_bit, excl_bit + 1))
2634 return 0;
2635
2636 if (STRICT_READ_CHECKS &&
2637 !usage(curr, this, excl_bit + 1,
2638 state_name(new_bit + 1)))
2639 return 0;
2640 }
2641
2642 if (state_verbose(new_bit, hlock_class(this)))
2643 return 2;
2644
2645 return 1;
2646}
2647
2648enum mark_type {
2649#define LOCKDEP_STATE(__STATE) __STATE,
2650#include "lockdep_states.h"
2651#undef LOCKDEP_STATE
2652};
2653
2654
2655
2656
2657static int
2658mark_held_locks(struct task_struct *curr, enum mark_type mark)
2659{
2660 enum lock_usage_bit usage_bit;
2661 struct held_lock *hlock;
2662 int i;
2663
2664 for (i = 0; i < curr->lockdep_depth; i++) {
2665 hlock = curr->held_locks + i;
2666
2667 usage_bit = 2 + (mark << 2);
2668 if (hlock->read)
2669 usage_bit += 1;
2670
2671 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2672
2673 if (!hlock->check)
2674 continue;
2675
2676 if (!mark_lock(curr, hlock, usage_bit))
2677 return 0;
2678 }
2679
2680 return 1;
2681}
2682
2683
2684
2685
2686static void __trace_hardirqs_on_caller(unsigned long ip)
2687{
2688 struct task_struct *curr = current;
2689
2690
2691 curr->hardirqs_enabled = 1;
2692
2693
2694
2695
2696
2697 if (!mark_held_locks(curr, HARDIRQ))
2698 return;
2699
2700
2701
2702
2703
2704 if (curr->softirqs_enabled)
2705 if (!mark_held_locks(curr, SOFTIRQ))
2706 return;
2707
2708 curr->hardirq_enable_ip = ip;
2709 curr->hardirq_enable_event = ++curr->irq_events;
2710 debug_atomic_inc(hardirqs_on_events);
2711}
2712
2713__visible void trace_hardirqs_on_caller(unsigned long ip)
2714{
2715 time_hardirqs_on(CALLER_ADDR0, ip);
2716
2717 if (unlikely(!debug_locks || current->lockdep_recursion))
2718 return;
2719
2720 if (unlikely(current->hardirqs_enabled)) {
2721
2722
2723
2724
2725
2726 __debug_atomic_inc(redundant_hardirqs_on);
2727 return;
2728 }
2729
2730
2731
2732
2733
2734
2735 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2736 return;
2737
2738
2739
2740
2741 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2742 return;
2743
2744
2745
2746
2747
2748 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2749 return;
2750
2751 current->lockdep_recursion = 1;
2752 __trace_hardirqs_on_caller(ip);
2753 current->lockdep_recursion = 0;
2754}
2755EXPORT_SYMBOL(trace_hardirqs_on_caller);
2756
2757void trace_hardirqs_on(void)
2758{
2759 trace_hardirqs_on_caller(CALLER_ADDR0);
2760}
2761EXPORT_SYMBOL(trace_hardirqs_on);
2762
2763
2764
2765
2766__visible void trace_hardirqs_off_caller(unsigned long ip)
2767{
2768 struct task_struct *curr = current;
2769
2770 time_hardirqs_off(CALLER_ADDR0, ip);
2771
2772 if (unlikely(!debug_locks || current->lockdep_recursion))
2773 return;
2774
2775
2776
2777
2778
2779 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2780 return;
2781
2782 if (curr->hardirqs_enabled) {
2783
2784
2785
2786 curr->hardirqs_enabled = 0;
2787 curr->hardirq_disable_ip = ip;
2788 curr->hardirq_disable_event = ++curr->irq_events;
2789 debug_atomic_inc(hardirqs_off_events);
2790 } else
2791 debug_atomic_inc(redundant_hardirqs_off);
2792}
2793EXPORT_SYMBOL(trace_hardirqs_off_caller);
2794
2795void trace_hardirqs_off(void)
2796{
2797 trace_hardirqs_off_caller(CALLER_ADDR0);
2798}
2799EXPORT_SYMBOL(trace_hardirqs_off);
2800
2801
2802
2803
2804void trace_softirqs_on(unsigned long ip)
2805{
2806 struct task_struct *curr = current;
2807
2808 if (unlikely(!debug_locks || current->lockdep_recursion))
2809 return;
2810
2811
2812
2813
2814
2815 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2816 return;
2817
2818 if (curr->softirqs_enabled) {
2819 debug_atomic_inc(redundant_softirqs_on);
2820 return;
2821 }
2822
2823 current->lockdep_recursion = 1;
2824
2825
2826
2827 curr->softirqs_enabled = 1;
2828 curr->softirq_enable_ip = ip;
2829 curr->softirq_enable_event = ++curr->irq_events;
2830 debug_atomic_inc(softirqs_on_events);
2831
2832
2833
2834
2835
2836 if (curr->hardirqs_enabled)
2837 mark_held_locks(curr, SOFTIRQ);
2838 current->lockdep_recursion = 0;
2839}
2840
2841
2842
2843
2844void trace_softirqs_off(unsigned long ip)
2845{
2846 struct task_struct *curr = current;
2847
2848 if (unlikely(!debug_locks || current->lockdep_recursion))
2849 return;
2850
2851
2852
2853
2854 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2855 return;
2856
2857 if (curr->softirqs_enabled) {
2858
2859
2860
2861 curr->softirqs_enabled = 0;
2862 curr->softirq_disable_ip = ip;
2863 curr->softirq_disable_event = ++curr->irq_events;
2864 debug_atomic_inc(softirqs_off_events);
2865
2866
2867
2868 DEBUG_LOCKS_WARN_ON(!softirq_count());
2869 } else
2870 debug_atomic_inc(redundant_softirqs_off);
2871}
2872
2873static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2874{
2875 struct task_struct *curr = current;
2876
2877 if (unlikely(!debug_locks))
2878 return;
2879
2880 gfp_mask = current_gfp_context(gfp_mask);
2881
2882
2883 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
2884 return;
2885
2886
2887 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2888 return;
2889
2890
2891 if (!(gfp_mask & __GFP_FS) || (curr->flags & PF_MEMALLOC_NOFS))
2892 return;
2893
2894
2895
2896
2897 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2898 return;
2899
2900
2901 if (gfp_mask & __GFP_NOLOCKDEP)
2902 return;
2903
2904 mark_held_locks(curr, RECLAIM_FS);
2905}
2906
2907static void check_flags(unsigned long flags);
2908
2909void lockdep_trace_alloc(gfp_t gfp_mask)
2910{
2911 unsigned long flags;
2912
2913 if (unlikely(current->lockdep_recursion))
2914 return;
2915
2916 raw_local_irq_save(flags);
2917 check_flags(flags);
2918 current->lockdep_recursion = 1;
2919 __lockdep_trace_alloc(gfp_mask, flags);
2920 current->lockdep_recursion = 0;
2921 raw_local_irq_restore(flags);
2922}
2923
2924static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2925{
2926
2927
2928
2929
2930 if (!hlock->trylock) {
2931 if (hlock->read) {
2932 if (curr->hardirq_context)
2933 if (!mark_lock(curr, hlock,
2934 LOCK_USED_IN_HARDIRQ_READ))
2935 return 0;
2936 if (curr->softirq_context)
2937 if (!mark_lock(curr, hlock,
2938 LOCK_USED_IN_SOFTIRQ_READ))
2939 return 0;
2940 } else {
2941 if (curr->hardirq_context)
2942 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2943 return 0;
2944 if (curr->softirq_context)
2945 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2946 return 0;
2947 }
2948 }
2949 if (!hlock->hardirqs_off) {
2950 if (hlock->read) {
2951 if (!mark_lock(curr, hlock,
2952 LOCK_ENABLED_HARDIRQ_READ))
2953 return 0;
2954 if (curr->softirqs_enabled)
2955 if (!mark_lock(curr, hlock,
2956 LOCK_ENABLED_SOFTIRQ_READ))
2957 return 0;
2958 } else {
2959 if (!mark_lock(curr, hlock,
2960 LOCK_ENABLED_HARDIRQ))
2961 return 0;
2962 if (curr->softirqs_enabled)
2963 if (!mark_lock(curr, hlock,
2964 LOCK_ENABLED_SOFTIRQ))
2965 return 0;
2966 }
2967 }
2968
2969
2970
2971
2972
2973
2974
2975 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2976 if (hlock->read) {
2977 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2978 return 0;
2979 } else {
2980 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2981 return 0;
2982 }
2983 }
2984
2985 return 1;
2986}
2987
2988static inline unsigned int task_irq_context(struct task_struct *task)
2989{
2990 return 2 * !!task->hardirq_context + !!task->softirq_context;
2991}
2992
2993static int separate_irq_context(struct task_struct *curr,
2994 struct held_lock *hlock)
2995{
2996 unsigned int depth = curr->lockdep_depth;
2997
2998
2999
3000
3001 if (depth) {
3002 struct held_lock *prev_hlock;
3003
3004 prev_hlock = curr->held_locks + depth-1;
3005
3006
3007
3008
3009
3010 if (prev_hlock->irq_context != hlock->irq_context)
3011 return 1;
3012 }
3013 return 0;
3014}
3015
3016#else
3017
3018static inline
3019int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3020 enum lock_usage_bit new_bit)
3021{
3022 WARN_ON(1);
3023 return 1;
3024}
3025
3026static inline int mark_irqflags(struct task_struct *curr,
3027 struct held_lock *hlock)
3028{
3029 return 1;
3030}
3031
3032static inline unsigned int task_irq_context(struct task_struct *task)
3033{
3034 return 0;
3035}
3036
3037static inline int separate_irq_context(struct task_struct *curr,
3038 struct held_lock *hlock)
3039{
3040 return 0;
3041}
3042
3043void lockdep_trace_alloc(gfp_t gfp_mask)
3044{
3045}
3046
3047#endif
3048
3049
3050
3051
3052static int mark_lock(struct task_struct *curr, struct held_lock *this,
3053 enum lock_usage_bit new_bit)
3054{
3055 unsigned int new_mask = 1 << new_bit, ret = 1;
3056
3057
3058
3059
3060
3061 if (likely(hlock_class(this)->usage_mask & new_mask))
3062 return 1;
3063
3064 if (!graph_lock())
3065 return 0;
3066
3067
3068
3069 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3070 graph_unlock();
3071 return 1;
3072 }
3073
3074 hlock_class(this)->usage_mask |= new_mask;
3075
3076 if (!save_trace(hlock_class(this)->usage_traces + new_bit))
3077 return 0;
3078
3079 switch (new_bit) {
3080#define LOCKDEP_STATE(__STATE) \
3081 case LOCK_USED_IN_##__STATE: \
3082 case LOCK_USED_IN_##__STATE##_READ: \
3083 case LOCK_ENABLED_##__STATE: \
3084 case LOCK_ENABLED_##__STATE##_READ:
3085#include "lockdep_states.h"
3086#undef LOCKDEP_STATE
3087 ret = mark_lock_irq(curr, this, new_bit);
3088 if (!ret)
3089 return 0;
3090 break;
3091 case LOCK_USED:
3092 debug_atomic_dec(nr_unused_locks);
3093 break;
3094 default:
3095 if (!debug_locks_off_graph_unlock())
3096 return 0;
3097 WARN_ON(1);
3098 return 0;
3099 }
3100
3101 graph_unlock();
3102
3103
3104
3105
3106 if (ret == 2) {
3107 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3108 print_lock(this);
3109 print_irqtrace_events(curr);
3110 dump_stack();
3111 }
3112
3113 return ret;
3114}
3115
3116
3117
3118
3119void lockdep_init_map(struct lockdep_map *lock, const char *name,
3120 struct lock_class_key *key, int subclass)
3121{
3122 int i;
3123
3124 kmemcheck_mark_initialized(lock, sizeof(*lock));
3125
3126 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3127 lock->class_cache[i] = NULL;
3128
3129#ifdef CONFIG_LOCK_STAT
3130 lock->cpu = raw_smp_processor_id();
3131#endif
3132
3133
3134
3135
3136 if (DEBUG_LOCKS_WARN_ON(!name)) {
3137 lock->name = "NULL";
3138 return;
3139 }
3140
3141 lock->name = name;
3142
3143
3144
3145
3146 if (DEBUG_LOCKS_WARN_ON(!key))
3147 return;
3148
3149
3150
3151 if (!static_obj(key)) {
3152 printk("BUG: key %p not in .data!\n", key);
3153
3154
3155
3156 DEBUG_LOCKS_WARN_ON(1);
3157 return;
3158 }
3159 lock->key = key;
3160
3161 if (unlikely(!debug_locks))
3162 return;
3163
3164 if (subclass) {
3165 unsigned long flags;
3166
3167 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3168 return;
3169
3170 raw_local_irq_save(flags);
3171 current->lockdep_recursion = 1;
3172 register_lock_class(lock, subclass, 1);
3173 current->lockdep_recursion = 0;
3174 raw_local_irq_restore(flags);
3175 }
3176}
3177EXPORT_SYMBOL_GPL(lockdep_init_map);
3178
3179struct lock_class_key __lockdep_no_validate__;
3180EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3181
3182static int
3183print_lock_nested_lock_not_held(struct task_struct *curr,
3184 struct held_lock *hlock,
3185 unsigned long ip)
3186{
3187 if (!debug_locks_off())
3188 return 0;
3189 if (debug_locks_silent)
3190 return 0;
3191
3192 pr_warn("\n");
3193 pr_warn("==================================\n");
3194 pr_warn("WARNING: Nested lock was not taken\n");
3195 print_kernel_ident();
3196 pr_warn("----------------------------------\n");
3197
3198 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3199 print_lock(hlock);
3200
3201 pr_warn("\nbut this task is not holding:\n");
3202 pr_warn("%s\n", hlock->nest_lock->name);
3203
3204 pr_warn("\nstack backtrace:\n");
3205 dump_stack();
3206
3207 pr_warn("\nother info that might help us debug this:\n");
3208 lockdep_print_held_locks(curr);
3209
3210 pr_warn("\nstack backtrace:\n");
3211 dump_stack();
3212
3213 return 0;
3214}
3215
3216static int __lock_is_held(struct lockdep_map *lock, int read);
3217
3218
3219
3220
3221
3222static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3223 int trylock, int read, int check, int hardirqs_off,
3224 struct lockdep_map *nest_lock, unsigned long ip,
3225 int references, int pin_count)
3226{
3227 struct task_struct *curr = current;
3228 struct lock_class *class = NULL;
3229 struct held_lock *hlock;
3230 unsigned int depth;
3231 int chain_head = 0;
3232 int class_idx;
3233 u64 chain_key;
3234
3235 if (unlikely(!debug_locks))
3236 return 0;
3237
3238
3239
3240
3241
3242
3243 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3244 return 0;
3245
3246 if (!prove_locking || lock->key == &__lockdep_no_validate__)
3247 check = 0;
3248
3249 if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3250 class = lock->class_cache[subclass];
3251
3252
3253
3254 if (unlikely(!class)) {
3255 class = register_lock_class(lock, subclass, 0);
3256 if (!class)
3257 return 0;
3258 }
3259 atomic_inc((atomic_t *)&class->ops);
3260 if (very_verbose(class)) {
3261 printk("\nacquire class [%p] %s", class->key, class->name);
3262 if (class->name_version > 1)
3263 printk(KERN_CONT "#%d", class->name_version);
3264 printk(KERN_CONT "\n");
3265 dump_stack();
3266 }
3267
3268
3269
3270
3271
3272
3273 depth = curr->lockdep_depth;
3274
3275
3276
3277 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3278 return 0;
3279
3280 class_idx = class - lock_classes + 1;
3281
3282 if (depth) {
3283 hlock = curr->held_locks + depth - 1;
3284 if (hlock->class_idx == class_idx && nest_lock) {
3285 if (hlock->references) {
3286
3287
3288
3289 if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
3290 return 0;
3291
3292 hlock->references++;
3293 } else {
3294 hlock->references = 2;
3295 }
3296
3297 return 1;
3298 }
3299 }
3300
3301 hlock = curr->held_locks + depth;
3302
3303
3304
3305
3306 if (DEBUG_LOCKS_WARN_ON(!class))
3307 return 0;
3308 hlock->class_idx = class_idx;
3309 hlock->acquire_ip = ip;
3310 hlock->instance = lock;
3311 hlock->nest_lock = nest_lock;
3312 hlock->irq_context = task_irq_context(curr);
3313 hlock->trylock = trylock;
3314 hlock->read = read;
3315 hlock->check = check;
3316 hlock->hardirqs_off = !!hardirqs_off;
3317 hlock->references = references;
3318#ifdef CONFIG_LOCK_STAT
3319 hlock->waittime_stamp = 0;
3320 hlock->holdtime_stamp = lockstat_clock();
3321#endif
3322 hlock->pin_count = pin_count;
3323
3324 if (check && !mark_irqflags(curr, hlock))
3325 return 0;
3326
3327
3328 if (!mark_lock(curr, hlock, LOCK_USED))
3329 return 0;
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344 if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
3345 return 0;
3346
3347 chain_key = curr->curr_chain_key;
3348 if (!depth) {
3349
3350
3351
3352 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3353 return 0;
3354 chain_head = 1;
3355 }
3356
3357 hlock->prev_chain_key = chain_key;
3358 if (separate_irq_context(curr, hlock)) {
3359 chain_key = 0;
3360 chain_head = 1;
3361 }
3362 chain_key = iterate_chain_key(chain_key, class_idx);
3363
3364 if (nest_lock && !__lock_is_held(nest_lock, -1))
3365 return print_lock_nested_lock_not_held(curr, hlock, ip);
3366
3367 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3368 return 0;
3369
3370 curr->curr_chain_key = chain_key;
3371 curr->lockdep_depth++;
3372 check_chain_key(curr);
3373#ifdef CONFIG_DEBUG_LOCKDEP
3374 if (unlikely(!debug_locks))
3375 return 0;
3376#endif
3377 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3378 debug_locks_off();
3379 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3380 printk(KERN_DEBUG "depth: %i max: %lu!\n",
3381 curr->lockdep_depth, MAX_LOCK_DEPTH);
3382
3383 lockdep_print_held_locks(current);
3384 debug_show_all_locks();
3385 dump_stack();
3386
3387 return 0;
3388 }
3389
3390 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3391 max_lockdep_depth = curr->lockdep_depth;
3392
3393 return 1;
3394}
3395
3396static int
3397print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3398 unsigned long ip)
3399{
3400 if (!debug_locks_off())
3401 return 0;
3402 if (debug_locks_silent)
3403 return 0;
3404
3405 pr_warn("\n");
3406 pr_warn("=====================================\n");
3407 pr_warn("WARNING: bad unlock balance detected!\n");
3408 print_kernel_ident();
3409 pr_warn("-------------------------------------\n");
3410 pr_warn("%s/%d is trying to release lock (",
3411 curr->comm, task_pid_nr(curr));
3412 print_lockdep_cache(lock);
3413 pr_cont(") at:\n");
3414 print_ip_sym(ip);
3415 pr_warn("but there are no more locks to release!\n");
3416 pr_warn("\nother info that might help us debug this:\n");
3417 lockdep_print_held_locks(curr);
3418
3419 pr_warn("\nstack backtrace:\n");
3420 dump_stack();
3421
3422 return 0;
3423}
3424
3425static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3426{
3427 if (hlock->instance == lock)
3428 return 1;
3429
3430 if (hlock->references) {
3431 struct lock_class *class = lock->class_cache[0];
3432
3433 if (!class)
3434 class = look_up_lock_class(lock, 0);
3435
3436
3437
3438
3439
3440
3441
3442 if (IS_ERR_OR_NULL(class))
3443 return 0;
3444
3445
3446
3447
3448
3449
3450 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3451 return 0;
3452
3453 if (hlock->class_idx == class - lock_classes + 1)
3454 return 1;
3455 }
3456
3457 return 0;
3458}
3459
3460
3461static struct held_lock *find_held_lock(struct task_struct *curr,
3462 struct lockdep_map *lock,
3463 unsigned int depth, int *idx)
3464{
3465 struct held_lock *ret, *hlock, *prev_hlock;
3466 int i;
3467
3468 i = depth - 1;
3469 hlock = curr->held_locks + i;
3470 ret = hlock;
3471 if (match_held_lock(hlock, lock))
3472 goto out;
3473
3474 ret = NULL;
3475 for (i--, prev_hlock = hlock--;
3476 i >= 0;
3477 i--, prev_hlock = hlock--) {
3478
3479
3480
3481 if (prev_hlock->irq_context != hlock->irq_context) {
3482 ret = NULL;
3483 break;
3484 }
3485 if (match_held_lock(hlock, lock)) {
3486 ret = hlock;
3487 break;
3488 }
3489 }
3490
3491out:
3492 *idx = i;
3493 return ret;
3494}
3495
3496static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3497 int idx)
3498{
3499 struct held_lock *hlock;
3500
3501 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3502 if (!__lock_acquire(hlock->instance,
3503 hlock_class(hlock)->subclass,
3504 hlock->trylock,
3505 hlock->read, hlock->check,
3506 hlock->hardirqs_off,
3507 hlock->nest_lock, hlock->acquire_ip,
3508 hlock->references, hlock->pin_count))
3509 return 1;
3510 }
3511 return 0;
3512}
3513
3514static int
3515__lock_set_class(struct lockdep_map *lock, const char *name,
3516 struct lock_class_key *key, unsigned int subclass,
3517 unsigned long ip)
3518{
3519 struct task_struct *curr = current;
3520 struct held_lock *hlock;
3521 struct lock_class *class;
3522 unsigned int depth;
3523 int i;
3524
3525 depth = curr->lockdep_depth;
3526
3527
3528
3529
3530 if (DEBUG_LOCKS_WARN_ON(!depth))
3531 return 0;
3532
3533 hlock = find_held_lock(curr, lock, depth, &i);
3534 if (!hlock)
3535 return print_unlock_imbalance_bug(curr, lock, ip);
3536
3537 lockdep_init_map(lock, name, key, 0);
3538 class = register_lock_class(lock, subclass, 0);
3539 hlock->class_idx = class - lock_classes + 1;
3540
3541 curr->lockdep_depth = i;
3542 curr->curr_chain_key = hlock->prev_chain_key;
3543
3544 if (reacquire_held_locks(curr, depth, i))
3545 return 0;
3546
3547
3548
3549
3550
3551 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3552 return 0;
3553 return 1;
3554}
3555
3556static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3557{
3558 struct task_struct *curr = current;
3559 struct held_lock *hlock;
3560 unsigned int depth;
3561 int i;
3562
3563 depth = curr->lockdep_depth;
3564
3565
3566
3567
3568 if (DEBUG_LOCKS_WARN_ON(!depth))
3569 return 0;
3570
3571 hlock = find_held_lock(curr, lock, depth, &i);
3572 if (!hlock)
3573 return print_unlock_imbalance_bug(curr, lock, ip);
3574
3575 curr->lockdep_depth = i;
3576 curr->curr_chain_key = hlock->prev_chain_key;
3577
3578 WARN(hlock->read, "downgrading a read lock");
3579 hlock->read = 1;
3580 hlock->acquire_ip = ip;
3581
3582 if (reacquire_held_locks(curr, depth, i))
3583 return 0;
3584
3585
3586
3587
3588
3589 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3590 return 0;
3591 return 1;
3592}
3593
3594
3595
3596
3597
3598
3599
3600
3601static int
3602__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3603{
3604 struct task_struct *curr = current;
3605 struct held_lock *hlock;
3606 unsigned int depth;
3607 int i;
3608
3609 if (unlikely(!debug_locks))
3610 return 0;
3611
3612 depth = curr->lockdep_depth;
3613
3614
3615
3616
3617 if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3618 return print_unlock_imbalance_bug(curr, lock, ip);
3619
3620
3621
3622
3623
3624 hlock = find_held_lock(curr, lock, depth, &i);
3625 if (!hlock)
3626 return print_unlock_imbalance_bug(curr, lock, ip);
3627
3628 if (hlock->instance == lock)
3629 lock_release_holdtime(hlock);
3630
3631 WARN(hlock->pin_count, "releasing a pinned lock\n");
3632
3633 if (hlock->references) {
3634 hlock->references--;
3635 if (hlock->references) {
3636
3637
3638
3639
3640
3641 return 1;
3642 }
3643 }
3644
3645
3646
3647
3648
3649
3650
3651 curr->lockdep_depth = i;
3652 curr->curr_chain_key = hlock->prev_chain_key;
3653
3654 if (reacquire_held_locks(curr, depth, i + 1))
3655 return 0;
3656
3657
3658
3659
3660
3661 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3662 return 0;
3663
3664 return 1;
3665}
3666
3667static int __lock_is_held(struct lockdep_map *lock, int read)
3668{
3669 struct task_struct *curr = current;
3670 int i;
3671
3672 for (i = 0; i < curr->lockdep_depth; i++) {
3673 struct held_lock *hlock = curr->held_locks + i;
3674
3675 if (match_held_lock(hlock, lock)) {
3676 if (read == -1 || hlock->read == read)
3677 return 1;
3678
3679 return 0;
3680 }
3681 }
3682
3683 return 0;
3684}
3685
3686static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
3687{
3688 struct pin_cookie cookie = NIL_COOKIE;
3689 struct task_struct *curr = current;
3690 int i;
3691
3692 if (unlikely(!debug_locks))
3693 return cookie;
3694
3695 for (i = 0; i < curr->lockdep_depth; i++) {
3696 struct held_lock *hlock = curr->held_locks + i;
3697
3698 if (match_held_lock(hlock, lock)) {
3699
3700
3701
3702
3703
3704 cookie.val = 1 + (prandom_u32() >> 16);
3705 hlock->pin_count += cookie.val;
3706 return cookie;
3707 }
3708 }
3709
3710 WARN(1, "pinning an unheld lock\n");
3711 return cookie;
3712}
3713
3714static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3715{
3716 struct task_struct *curr = current;
3717 int i;
3718
3719 if (unlikely(!debug_locks))
3720 return;
3721
3722 for (i = 0; i < curr->lockdep_depth; i++) {
3723 struct held_lock *hlock = curr->held_locks + i;
3724
3725 if (match_held_lock(hlock, lock)) {
3726 hlock->pin_count += cookie.val;
3727 return;
3728 }
3729 }
3730
3731 WARN(1, "pinning an unheld lock\n");
3732}
3733
3734static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3735{
3736 struct task_struct *curr = current;
3737 int i;
3738
3739 if (unlikely(!debug_locks))
3740 return;
3741
3742 for (i = 0; i < curr->lockdep_depth; i++) {
3743 struct held_lock *hlock = curr->held_locks + i;
3744
3745 if (match_held_lock(hlock, lock)) {
3746 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3747 return;
3748
3749 hlock->pin_count -= cookie.val;
3750
3751 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
3752 hlock->pin_count = 0;
3753
3754 return;
3755 }
3756 }
3757
3758 WARN(1, "unpinning an unheld lock\n");
3759}
3760
3761
3762
3763
3764static void check_flags(unsigned long flags)
3765{
3766#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3767 defined(CONFIG_TRACE_IRQFLAGS)
3768 if (!debug_locks)
3769 return;
3770
3771 if (irqs_disabled_flags(flags)) {
3772 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3773 printk("possible reason: unannotated irqs-off.\n");
3774 }
3775 } else {
3776 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3777 printk("possible reason: unannotated irqs-on.\n");
3778 }
3779 }
3780
3781
3782
3783
3784
3785
3786 if (!hardirq_count()) {
3787 if (softirq_count()) {
3788
3789 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3790 } else {
3791
3792 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3793 }
3794 }
3795
3796 if (!debug_locks)
3797 print_irqtrace_events(current);
3798#endif
3799}
3800
3801void lock_set_class(struct lockdep_map *lock, const char *name,
3802 struct lock_class_key *key, unsigned int subclass,
3803 unsigned long ip)
3804{
3805 unsigned long flags;
3806
3807 if (unlikely(current->lockdep_recursion))
3808 return;
3809
3810 raw_local_irq_save(flags);
3811 current->lockdep_recursion = 1;
3812 check_flags(flags);
3813 if (__lock_set_class(lock, name, key, subclass, ip))
3814 check_chain_key(current);
3815 current->lockdep_recursion = 0;
3816 raw_local_irq_restore(flags);
3817}
3818EXPORT_SYMBOL_GPL(lock_set_class);
3819
3820void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3821{
3822 unsigned long flags;
3823
3824 if (unlikely(current->lockdep_recursion))
3825 return;
3826
3827 raw_local_irq_save(flags);
3828 current->lockdep_recursion = 1;
3829 check_flags(flags);
3830 if (__lock_downgrade(lock, ip))
3831 check_chain_key(current);
3832 current->lockdep_recursion = 0;
3833 raw_local_irq_restore(flags);
3834}
3835EXPORT_SYMBOL_GPL(lock_downgrade);
3836
3837
3838
3839
3840
3841void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3842 int trylock, int read, int check,
3843 struct lockdep_map *nest_lock, unsigned long ip)
3844{
3845 unsigned long flags;
3846
3847 if (unlikely(current->lockdep_recursion))
3848 return;
3849
3850 raw_local_irq_save(flags);
3851 check_flags(flags);
3852
3853 current->lockdep_recursion = 1;
3854 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3855 __lock_acquire(lock, subclass, trylock, read, check,
3856 irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
3857 current->lockdep_recursion = 0;
3858 raw_local_irq_restore(flags);
3859}
3860EXPORT_SYMBOL_GPL(lock_acquire);
3861
3862void lock_release(struct lockdep_map *lock, int nested,
3863 unsigned long ip)
3864{
3865 unsigned long flags;
3866
3867 if (unlikely(current->lockdep_recursion))
3868 return;
3869
3870 raw_local_irq_save(flags);
3871 check_flags(flags);
3872 current->lockdep_recursion = 1;
3873 trace_lock_release(lock, ip);
3874 if (__lock_release(lock, nested, ip))
3875 check_chain_key(current);
3876 current->lockdep_recursion = 0;
3877 raw_local_irq_restore(flags);
3878}
3879EXPORT_SYMBOL_GPL(lock_release);
3880
3881int lock_is_held_type(struct lockdep_map *lock, int read)
3882{
3883 unsigned long flags;
3884 int ret = 0;
3885
3886 if (unlikely(current->lockdep_recursion))
3887 return 1;
3888
3889 raw_local_irq_save(flags);
3890 check_flags(flags);
3891
3892 current->lockdep_recursion = 1;
3893 ret = __lock_is_held(lock, read);
3894 current->lockdep_recursion = 0;
3895 raw_local_irq_restore(flags);
3896
3897 return ret;
3898}
3899EXPORT_SYMBOL_GPL(lock_is_held_type);
3900
3901struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
3902{
3903 struct pin_cookie cookie = NIL_COOKIE;
3904 unsigned long flags;
3905
3906 if (unlikely(current->lockdep_recursion))
3907 return cookie;
3908
3909 raw_local_irq_save(flags);
3910 check_flags(flags);
3911
3912 current->lockdep_recursion = 1;
3913 cookie = __lock_pin_lock(lock);
3914 current->lockdep_recursion = 0;
3915 raw_local_irq_restore(flags);
3916
3917 return cookie;
3918}
3919EXPORT_SYMBOL_GPL(lock_pin_lock);
3920
3921void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3922{
3923 unsigned long flags;
3924
3925 if (unlikely(current->lockdep_recursion))
3926 return;
3927
3928 raw_local_irq_save(flags);
3929 check_flags(flags);
3930
3931 current->lockdep_recursion = 1;
3932 __lock_repin_lock(lock, cookie);
3933 current->lockdep_recursion = 0;
3934 raw_local_irq_restore(flags);
3935}
3936EXPORT_SYMBOL_GPL(lock_repin_lock);
3937
3938void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3939{
3940 unsigned long flags;
3941
3942 if (unlikely(current->lockdep_recursion))
3943 return;
3944
3945 raw_local_irq_save(flags);
3946 check_flags(flags);
3947
3948 current->lockdep_recursion = 1;
3949 __lock_unpin_lock(lock, cookie);
3950 current->lockdep_recursion = 0;
3951 raw_local_irq_restore(flags);
3952}
3953EXPORT_SYMBOL_GPL(lock_unpin_lock);
3954
3955void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3956{
3957 current->lockdep_reclaim_gfp = current_gfp_context(gfp_mask);
3958}
3959EXPORT_SYMBOL_GPL(lockdep_set_current_reclaim_state);
3960
3961void lockdep_clear_current_reclaim_state(void)
3962{
3963 current->lockdep_reclaim_gfp = 0;
3964}
3965EXPORT_SYMBOL_GPL(lockdep_clear_current_reclaim_state);
3966
3967#ifdef CONFIG_LOCK_STAT
3968static int
3969print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3970 unsigned long ip)
3971{
3972 if (!debug_locks_off())
3973 return 0;
3974 if (debug_locks_silent)
3975 return 0;
3976
3977 pr_warn("\n");
3978 pr_warn("=================================\n");
3979 pr_warn("WARNING: bad contention detected!\n");
3980 print_kernel_ident();
3981 pr_warn("---------------------------------\n");
3982 pr_warn("%s/%d is trying to contend lock (",
3983 curr->comm, task_pid_nr(curr));
3984 print_lockdep_cache(lock);
3985 pr_cont(") at:\n");
3986 print_ip_sym(ip);
3987 pr_warn("but there are no locks held!\n");
3988 pr_warn("\nother info that might help us debug this:\n");
3989 lockdep_print_held_locks(curr);
3990
3991 pr_warn("\nstack backtrace:\n");
3992 dump_stack();
3993
3994 return 0;
3995}
3996
3997static void
3998__lock_contended(struct lockdep_map *lock, unsigned long ip)
3999{
4000 struct task_struct *curr = current;
4001 struct held_lock *hlock;
4002 struct lock_class_stats *stats;
4003 unsigned int depth;
4004 int i, contention_point, contending_point;
4005
4006 depth = curr->lockdep_depth;
4007
4008
4009
4010
4011 if (DEBUG_LOCKS_WARN_ON(!depth))
4012 return;
4013
4014 hlock = find_held_lock(curr, lock, depth, &i);
4015 if (!hlock) {
4016 print_lock_contention_bug(curr, lock, ip);
4017 return;
4018 }
4019
4020 if (hlock->instance != lock)
4021 return;
4022
4023 hlock->waittime_stamp = lockstat_clock();
4024
4025 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
4026 contending_point = lock_point(hlock_class(hlock)->contending_point,
4027 lock->ip);
4028
4029 stats = get_lock_stats(hlock_class(hlock));
4030 if (contention_point < LOCKSTAT_POINTS)
4031 stats->contention_point[contention_point]++;
4032 if (contending_point < LOCKSTAT_POINTS)
4033 stats->contending_point[contending_point]++;
4034 if (lock->cpu != smp_processor_id())
4035 stats->bounces[bounce_contended + !!hlock->read]++;
4036 put_lock_stats(stats);
4037}
4038
4039static void
4040__lock_acquired(struct lockdep_map *lock, unsigned long ip)
4041{
4042 struct task_struct *curr = current;
4043 struct held_lock *hlock;
4044 struct lock_class_stats *stats;
4045 unsigned int depth;
4046 u64 now, waittime = 0;
4047 int i, cpu;
4048
4049 depth = curr->lockdep_depth;
4050
4051
4052
4053
4054 if (DEBUG_LOCKS_WARN_ON(!depth))
4055 return;
4056
4057 hlock = find_held_lock(curr, lock, depth, &i);
4058 if (!hlock) {
4059 print_lock_contention_bug(curr, lock, _RET_IP_);
4060 return;
4061 }
4062
4063 if (hlock->instance != lock)
4064 return;
4065
4066 cpu = smp_processor_id();
4067 if (hlock->waittime_stamp) {
4068 now = lockstat_clock();
4069 waittime = now - hlock->waittime_stamp;
4070 hlock->holdtime_stamp = now;
4071 }
4072
4073 trace_lock_acquired(lock, ip);
4074
4075 stats = get_lock_stats(hlock_class(hlock));
4076 if (waittime) {
4077 if (hlock->read)
4078 lock_time_inc(&stats->read_waittime, waittime);
4079 else
4080 lock_time_inc(&stats->write_waittime, waittime);
4081 }
4082 if (lock->cpu != cpu)
4083 stats->bounces[bounce_acquired + !!hlock->read]++;
4084 put_lock_stats(stats);
4085
4086 lock->cpu = cpu;
4087 lock->ip = ip;
4088}
4089
4090void lock_contended(struct lockdep_map *lock, unsigned long ip)
4091{
4092 unsigned long flags;
4093
4094 if (unlikely(!lock_stat))
4095 return;
4096
4097 if (unlikely(current->lockdep_recursion))
4098 return;
4099
4100 raw_local_irq_save(flags);
4101 check_flags(flags);
4102 current->lockdep_recursion = 1;
4103 trace_lock_contended(lock, ip);
4104 __lock_contended(lock, ip);
4105 current->lockdep_recursion = 0;
4106 raw_local_irq_restore(flags);
4107}
4108EXPORT_SYMBOL_GPL(lock_contended);
4109
4110void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4111{
4112 unsigned long flags;
4113
4114 if (unlikely(!lock_stat))
4115 return;
4116
4117 if (unlikely(current->lockdep_recursion))
4118 return;
4119
4120 raw_local_irq_save(flags);
4121 check_flags(flags);
4122 current->lockdep_recursion = 1;
4123 __lock_acquired(lock, ip);
4124 current->lockdep_recursion = 0;
4125 raw_local_irq_restore(flags);
4126}
4127EXPORT_SYMBOL_GPL(lock_acquired);
4128#endif
4129
4130
4131
4132
4133
4134
4135void lockdep_reset(void)
4136{
4137 unsigned long flags;
4138 int i;
4139
4140 raw_local_irq_save(flags);
4141 current->curr_chain_key = 0;
4142 current->lockdep_depth = 0;
4143 current->lockdep_recursion = 0;
4144 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4145 nr_hardirq_chains = 0;
4146 nr_softirq_chains = 0;
4147 nr_process_chains = 0;
4148 debug_locks = 1;
4149 for (i = 0; i < CHAINHASH_SIZE; i++)
4150 INIT_HLIST_HEAD(chainhash_table + i);
4151 raw_local_irq_restore(flags);
4152}
4153
4154static void zap_class(struct lock_class *class)
4155{
4156 int i;
4157
4158
4159
4160
4161
4162 for (i = 0; i < nr_list_entries; i++) {
4163 if (list_entries[i].class == class)
4164 list_del_rcu(&list_entries[i].entry);
4165 }
4166
4167
4168
4169 hlist_del_rcu(&class->hash_entry);
4170 list_del_rcu(&class->lock_entry);
4171
4172 RCU_INIT_POINTER(class->key, NULL);
4173 RCU_INIT_POINTER(class->name, NULL);
4174}
4175
4176static inline int within(const void *addr, void *start, unsigned long size)
4177{
4178 return addr >= start && addr < start + size;
4179}
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189void lockdep_free_key_range(void *start, unsigned long size)
4190{
4191 struct lock_class *class;
4192 struct hlist_head *head;
4193 unsigned long flags;
4194 int i;
4195 int locked;
4196
4197 raw_local_irq_save(flags);
4198 locked = graph_lock();
4199
4200
4201
4202
4203 for (i = 0; i < CLASSHASH_SIZE; i++) {
4204 head = classhash_table + i;
4205 hlist_for_each_entry_rcu(class, head, hash_entry) {
4206 if (within(class->key, start, size))
4207 zap_class(class);
4208 else if (within(class->name, start, size))
4209 zap_class(class);
4210 }
4211 }
4212
4213 if (locked)
4214 graph_unlock();
4215 raw_local_irq_restore(flags);
4216
4217
4218
4219
4220
4221
4222
4223 synchronize_sched();
4224
4225
4226
4227
4228
4229
4230}
4231
4232void lockdep_reset_lock(struct lockdep_map *lock)
4233{
4234 struct lock_class *class;
4235 struct hlist_head *head;
4236 unsigned long flags;
4237 int i, j;
4238 int locked;
4239
4240 raw_local_irq_save(flags);
4241
4242
4243
4244
4245 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
4246
4247
4248
4249 class = look_up_lock_class(lock, j);
4250 if (!IS_ERR_OR_NULL(class))
4251 zap_class(class);
4252 }
4253
4254
4255
4256
4257 locked = graph_lock();
4258 for (i = 0; i < CLASSHASH_SIZE; i++) {
4259 head = classhash_table + i;
4260 hlist_for_each_entry_rcu(class, head, hash_entry) {
4261 int match = 0;
4262
4263 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4264 match |= class == lock->class_cache[j];
4265
4266 if (unlikely(match)) {
4267 if (debug_locks_off_graph_unlock()) {
4268
4269
4270
4271 WARN_ON(1);
4272 }
4273 goto out_restore;
4274 }
4275 }
4276 }
4277 if (locked)
4278 graph_unlock();
4279
4280out_restore:
4281 raw_local_irq_restore(flags);
4282}
4283
4284void __init lockdep_info(void)
4285{
4286 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4287
4288 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
4289 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
4290 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
4291 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
4292 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
4293 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
4294 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
4295
4296 printk(" memory used by lock dependency info: %lu kB\n",
4297 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
4298 sizeof(struct list_head) * CLASSHASH_SIZE +
4299 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
4300 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
4301 sizeof(struct list_head) * CHAINHASH_SIZE
4302#ifdef CONFIG_PROVE_LOCKING
4303 + sizeof(struct circular_queue)
4304#endif
4305 ) / 1024
4306 );
4307
4308 printk(" per task-struct memory footprint: %lu bytes\n",
4309 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
4310}
4311
4312static void
4313print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4314 const void *mem_to, struct held_lock *hlock)
4315{
4316 if (!debug_locks_off())
4317 return;
4318 if (debug_locks_silent)
4319 return;
4320
4321 pr_warn("\n");
4322 pr_warn("=========================\n");
4323 pr_warn("WARNING: held lock freed!\n");
4324 print_kernel_ident();
4325 pr_warn("-------------------------\n");
4326 pr_warn("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
4327 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4328 print_lock(hlock);
4329 lockdep_print_held_locks(curr);
4330
4331 pr_warn("\nstack backtrace:\n");
4332 dump_stack();
4333}
4334
4335static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4336 const void* lock_from, unsigned long lock_len)
4337{
4338 return lock_from + lock_len <= mem_from ||
4339 mem_from + mem_len <= lock_from;
4340}
4341
4342
4343
4344
4345
4346
4347void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4348{
4349 struct task_struct *curr = current;
4350 struct held_lock *hlock;
4351 unsigned long flags;
4352 int i;
4353
4354 if (unlikely(!debug_locks))
4355 return;
4356
4357 local_irq_save(flags);
4358 for (i = 0; i < curr->lockdep_depth; i++) {
4359 hlock = curr->held_locks + i;
4360
4361 if (not_in_range(mem_from, mem_len, hlock->instance,
4362 sizeof(*hlock->instance)))
4363 continue;
4364
4365 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4366 break;
4367 }
4368 local_irq_restore(flags);
4369}
4370EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4371
4372static void print_held_locks_bug(void)
4373{
4374 if (!debug_locks_off())
4375 return;
4376 if (debug_locks_silent)
4377 return;
4378
4379 pr_warn("\n");
4380 pr_warn("====================================\n");
4381 pr_warn("WARNING: %s/%d still has locks held!\n",
4382 current->comm, task_pid_nr(current));
4383 print_kernel_ident();
4384 pr_warn("------------------------------------\n");
4385 lockdep_print_held_locks(current);
4386 pr_warn("\nstack backtrace:\n");
4387 dump_stack();
4388}
4389
4390void debug_check_no_locks_held(void)
4391{
4392 if (unlikely(current->lockdep_depth > 0))
4393 print_held_locks_bug();
4394}
4395EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4396
4397#ifdef __KERNEL__
4398void debug_show_all_locks(void)
4399{
4400 struct task_struct *g, *p;
4401 int count = 10;
4402 int unlock = 1;
4403
4404 if (unlikely(!debug_locks)) {
4405 pr_warn("INFO: lockdep is turned off.\n");
4406 return;
4407 }
4408 pr_warn("\nShowing all locks held in the system:\n");
4409
4410
4411
4412
4413
4414
4415
4416retry:
4417 if (!read_trylock(&tasklist_lock)) {
4418 if (count == 10)
4419 pr_warn("hm, tasklist_lock locked, retrying... ");
4420 if (count) {
4421 count--;
4422 pr_cont(" #%d", 10-count);
4423 mdelay(200);
4424 goto retry;
4425 }
4426 pr_cont(" ignoring it.\n");
4427 unlock = 0;
4428 } else {
4429 if (count != 10)
4430 pr_cont(" locked it.\n");
4431 }
4432
4433 do_each_thread(g, p) {
4434
4435
4436
4437
4438
4439 if (p->state == TASK_RUNNING && p != current)
4440 continue;
4441 if (p->lockdep_depth)
4442 lockdep_print_held_locks(p);
4443 if (!unlock)
4444 if (read_trylock(&tasklist_lock))
4445 unlock = 1;
4446 } while_each_thread(g, p);
4447
4448 pr_warn("\n");
4449 pr_warn("=============================================\n\n");
4450
4451 if (unlock)
4452 read_unlock(&tasklist_lock);
4453}
4454EXPORT_SYMBOL_GPL(debug_show_all_locks);
4455#endif
4456
4457
4458
4459
4460
4461void debug_show_held_locks(struct task_struct *task)
4462{
4463 if (unlikely(!debug_locks)) {
4464 printk("INFO: lockdep is turned off.\n");
4465 return;
4466 }
4467 lockdep_print_held_locks(task);
4468}
4469EXPORT_SYMBOL_GPL(debug_show_held_locks);
4470
4471asmlinkage __visible void lockdep_sys_exit(void)
4472{
4473 struct task_struct *curr = current;
4474
4475 if (unlikely(curr->lockdep_depth)) {
4476 if (!debug_locks_off())
4477 return;
4478 pr_warn("\n");
4479 pr_warn("================================================\n");
4480 pr_warn("WARNING: lock held when returning to user space!\n");
4481 print_kernel_ident();
4482 pr_warn("------------------------------------------------\n");
4483 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
4484 curr->comm, curr->pid);
4485 lockdep_print_held_locks(curr);
4486 }
4487}
4488
4489void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4490{
4491 struct task_struct *curr = current;
4492
4493
4494 pr_warn("\n");
4495 pr_warn("=============================\n");
4496 pr_warn("WARNING: suspicious RCU usage\n");
4497 print_kernel_ident();
4498 pr_warn("-----------------------------\n");
4499 pr_warn("%s:%d %s!\n", file, line, s);
4500 pr_warn("\nother info that might help us debug this:\n\n");
4501 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4502 !rcu_lockdep_current_cpu_online()
4503 ? "RCU used illegally from offline CPU!\n"
4504 : !rcu_is_watching()
4505 ? "RCU used illegally from idle CPU!\n"
4506 : "",
4507 rcu_scheduler_active, debug_locks);
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527 if (!rcu_is_watching())
4528 pr_warn("RCU used illegally from extended quiescent state!\n");
4529
4530 lockdep_print_held_locks(curr);
4531 pr_warn("\nstack backtrace:\n");
4532 dump_stack();
4533}
4534EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
4535