1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define DISABLE_BRANCH_PROFILING
30#include <linux/mutex.h>
31#include <linux/sched.h>
32#include <linux/sched/clock.h>
33#include <linux/sched/task.h>
34#include <linux/sched/mm.h>
35#include <linux/delay.h>
36#include <linux/module.h>
37#include <linux/proc_fs.h>
38#include <linux/seq_file.h>
39#include <linux/spinlock.h>
40#include <linux/kallsyms.h>
41#include <linux/interrupt.h>
42#include <linux/stacktrace.h>
43#include <linux/debug_locks.h>
44#include <linux/irqflags.h>
45#include <linux/utsname.h>
46#include <linux/hash.h>
47#include <linux/ftrace.h>
48#include <linux/stringify.h>
49#include <linux/bitmap.h>
50#include <linux/bitops.h>
51#include <linux/gfp.h>
52#include <linux/random.h>
53#include <linux/jhash.h>
54#include <linux/nmi.h>
55#include <linux/rcupdate.h>
56#include <linux/kprobes.h>
57#include <linux/lockdep.h>
58
59#include <asm/sections.h>
60
61#include "lockdep_internals.h"
62
63#define CREATE_TRACE_POINTS
64#include <trace/events/lock.h>
65
66#ifdef CONFIG_PROVE_LOCKING
67int prove_locking = 1;
68module_param(prove_locking, int, 0644);
69#else
70#define prove_locking 0
71#endif
72
73#ifdef CONFIG_LOCK_STAT
74int lock_stat = 1;
75module_param(lock_stat, int, 0644);
76#else
77#define lock_stat 0
78#endif
79
80DEFINE_PER_CPU(unsigned int, lockdep_recursion);
81EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
82
83static __always_inline bool lockdep_enabled(void)
84{
85 if (!debug_locks)
86 return false;
87
88 if (this_cpu_read(lockdep_recursion))
89 return false;
90
91 if (current->lockdep_recursion)
92 return false;
93
94 return true;
95}
96
97
98
99
100
101
102
103
104
105static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
106static struct task_struct *__owner;
107
108static inline void lockdep_lock(void)
109{
110 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
111
112 __this_cpu_inc(lockdep_recursion);
113 arch_spin_lock(&__lock);
114 __owner = current;
115}
116
117static inline void lockdep_unlock(void)
118{
119 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
120
121 if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
122 return;
123
124 __owner = NULL;
125 arch_spin_unlock(&__lock);
126 __this_cpu_dec(lockdep_recursion);
127}
128
129static inline bool lockdep_assert_locked(void)
130{
131 return DEBUG_LOCKS_WARN_ON(__owner != current);
132}
133
134static struct task_struct *lockdep_selftest_task_struct;
135
136
137static int graph_lock(void)
138{
139 lockdep_lock();
140
141
142
143
144
145
146 if (!debug_locks) {
147 lockdep_unlock();
148 return 0;
149 }
150 return 1;
151}
152
153static inline void graph_unlock(void)
154{
155 lockdep_unlock();
156}
157
158
159
160
161
162static inline int debug_locks_off_graph_unlock(void)
163{
164 int ret = debug_locks_off();
165
166 lockdep_unlock();
167
168 return ret;
169}
170
171unsigned long nr_list_entries;
172static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
173static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
174
175
176
177
178
179
180
181#define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
182#define KEYHASH_SIZE (1UL << KEYHASH_BITS)
183static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
184unsigned long nr_lock_classes;
185unsigned long nr_zapped_classes;
186#ifndef CONFIG_DEBUG_LOCKDEP
187static
188#endif
189struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
190static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
191
192static inline struct lock_class *hlock_class(struct held_lock *hlock)
193{
194 unsigned int class_idx = hlock->class_idx;
195
196
197 barrier();
198
199 if (!test_bit(class_idx, lock_classes_in_use)) {
200
201
202
203 DEBUG_LOCKS_WARN_ON(1);
204 return NULL;
205 }
206
207
208
209
210
211 return lock_classes + class_idx;
212}
213
214#ifdef CONFIG_LOCK_STAT
215static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
216
217static inline u64 lockstat_clock(void)
218{
219 return local_clock();
220}
221
222static int lock_point(unsigned long points[], unsigned long ip)
223{
224 int i;
225
226 for (i = 0; i < LOCKSTAT_POINTS; i++) {
227 if (points[i] == 0) {
228 points[i] = ip;
229 break;
230 }
231 if (points[i] == ip)
232 break;
233 }
234
235 return i;
236}
237
238static void lock_time_inc(struct lock_time *lt, u64 time)
239{
240 if (time > lt->max)
241 lt->max = time;
242
243 if (time < lt->min || !lt->nr)
244 lt->min = time;
245
246 lt->total += time;
247 lt->nr++;
248}
249
250static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
251{
252 if (!src->nr)
253 return;
254
255 if (src->max > dst->max)
256 dst->max = src->max;
257
258 if (src->min < dst->min || !dst->nr)
259 dst->min = src->min;
260
261 dst->total += src->total;
262 dst->nr += src->nr;
263}
264
265struct lock_class_stats lock_stats(struct lock_class *class)
266{
267 struct lock_class_stats stats;
268 int cpu, i;
269
270 memset(&stats, 0, sizeof(struct lock_class_stats));
271 for_each_possible_cpu(cpu) {
272 struct lock_class_stats *pcs =
273 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
274
275 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
276 stats.contention_point[i] += pcs->contention_point[i];
277
278 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
279 stats.contending_point[i] += pcs->contending_point[i];
280
281 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
282 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
283
284 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
285 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
286
287 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
288 stats.bounces[i] += pcs->bounces[i];
289 }
290
291 return stats;
292}
293
294void clear_lock_stats(struct lock_class *class)
295{
296 int cpu;
297
298 for_each_possible_cpu(cpu) {
299 struct lock_class_stats *cpu_stats =
300 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
301
302 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
303 }
304 memset(class->contention_point, 0, sizeof(class->contention_point));
305 memset(class->contending_point, 0, sizeof(class->contending_point));
306}
307
308static struct lock_class_stats *get_lock_stats(struct lock_class *class)
309{
310 return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
311}
312
313static void lock_release_holdtime(struct held_lock *hlock)
314{
315 struct lock_class_stats *stats;
316 u64 holdtime;
317
318 if (!lock_stat)
319 return;
320
321 holdtime = lockstat_clock() - hlock->holdtime_stamp;
322
323 stats = get_lock_stats(hlock_class(hlock));
324 if (hlock->read)
325 lock_time_inc(&stats->read_holdtime, holdtime);
326 else
327 lock_time_inc(&stats->write_holdtime, holdtime);
328}
329#else
330static inline void lock_release_holdtime(struct held_lock *hlock)
331{
332}
333#endif
334
335
336
337
338
339
340
341LIST_HEAD(all_lock_classes);
342static LIST_HEAD(free_lock_classes);
343
344
345
346
347
348
349
350struct pending_free {
351 struct list_head zapped;
352 DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
353};
354
355
356
357
358
359
360
361
362
363
364
365
366static struct delayed_free {
367 struct rcu_head rcu_head;
368 int index;
369 int scheduled;
370 struct pending_free pf[2];
371} delayed_free;
372
373
374
375
376#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
377#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
378#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
379#define classhashentry(key) (classhash_table + __classhashfn((key)))
380
381static struct hlist_head classhash_table[CLASSHASH_SIZE];
382
383
384
385
386
387#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
388#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
389#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
390#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
391
392static struct hlist_head chainhash_table[CHAINHASH_SIZE];
393
394
395
396
397static inline u16 hlock_id(struct held_lock *hlock)
398{
399 BUILD_BUG_ON(MAX_LOCKDEP_KEYS_BITS + 2 > 16);
400
401 return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS));
402}
403
404static inline unsigned int chain_hlock_class_idx(u16 hlock_id)
405{
406 return hlock_id & (MAX_LOCKDEP_KEYS - 1);
407}
408
409
410
411
412
413
414
415static inline u64 iterate_chain_key(u64 key, u32 idx)
416{
417 u32 k0 = key, k1 = key >> 32;
418
419 __jhash_mix(idx, k0, k1);
420
421 return k0 | (u64)k1 << 32;
422}
423
424void lockdep_init_task(struct task_struct *task)
425{
426 task->lockdep_depth = 0;
427 task->curr_chain_key = INITIAL_CHAIN_KEY;
428 task->lockdep_recursion = 0;
429}
430
431static __always_inline void lockdep_recursion_inc(void)
432{
433 __this_cpu_inc(lockdep_recursion);
434}
435
436static __always_inline void lockdep_recursion_finish(void)
437{
438 if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion)))
439 __this_cpu_write(lockdep_recursion, 0);
440}
441
442void lockdep_set_selftest_task(struct task_struct *task)
443{
444 lockdep_selftest_task_struct = task;
445}
446
447
448
449
450
451#define VERBOSE 0
452#define VERY_VERBOSE 0
453
454#if VERBOSE
455# define HARDIRQ_VERBOSE 1
456# define SOFTIRQ_VERBOSE 1
457#else
458# define HARDIRQ_VERBOSE 0
459# define SOFTIRQ_VERBOSE 0
460#endif
461
462#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
463
464
465
466static int class_filter(struct lock_class *class)
467{
468#if 0
469
470 if (class->name_version == 1 &&
471 !strcmp(class->name, "lockname"))
472 return 1;
473 if (class->name_version == 1 &&
474 !strcmp(class->name, "&struct->lockfield"))
475 return 1;
476#endif
477
478 return 0;
479}
480#endif
481
482static int verbose(struct lock_class *class)
483{
484#if VERBOSE
485 return class_filter(class);
486#endif
487 return 0;
488}
489
490static void print_lockdep_off(const char *bug_msg)
491{
492 printk(KERN_DEBUG "%s\n", bug_msg);
493 printk(KERN_DEBUG "turning off the locking correctness validator.\n");
494#ifdef CONFIG_LOCK_STAT
495 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
496#endif
497}
498
499unsigned long nr_stack_trace_entries;
500
501#ifdef CONFIG_PROVE_LOCKING
502
503
504
505
506
507
508
509struct lock_trace {
510 struct hlist_node hash_entry;
511 u32 hash;
512 u32 nr_entries;
513 unsigned long entries[] __aligned(sizeof(unsigned long));
514};
515#define LOCK_TRACE_SIZE_IN_LONGS \
516 (sizeof(struct lock_trace) / sizeof(unsigned long))
517
518
519
520static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
521static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE];
522
523static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
524{
525 return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries &&
526 memcmp(t1->entries, t2->entries,
527 t1->nr_entries * sizeof(t1->entries[0])) == 0;
528}
529
530static struct lock_trace *save_trace(void)
531{
532 struct lock_trace *trace, *t2;
533 struct hlist_head *hash_head;
534 u32 hash;
535 int max_entries;
536
537 BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
538 BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
539
540 trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
541 max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
542 LOCK_TRACE_SIZE_IN_LONGS;
543
544 if (max_entries <= 0) {
545 if (!debug_locks_off_graph_unlock())
546 return NULL;
547
548 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
549 dump_stack();
550
551 return NULL;
552 }
553 trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
554
555 hash = jhash(trace->entries, trace->nr_entries *
556 sizeof(trace->entries[0]), 0);
557 trace->hash = hash;
558 hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1));
559 hlist_for_each_entry(t2, hash_head, hash_entry) {
560 if (traces_identical(trace, t2))
561 return t2;
562 }
563 nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries;
564 hlist_add_head(&trace->hash_entry, hash_head);
565
566 return trace;
567}
568
569
570u64 lockdep_stack_trace_count(void)
571{
572 struct lock_trace *trace;
573 u64 c = 0;
574 int i;
575
576 for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) {
577 hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) {
578 c++;
579 }
580 }
581
582 return c;
583}
584
585
586u64 lockdep_stack_hash_count(void)
587{
588 u64 c = 0;
589 int i;
590
591 for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++)
592 if (!hlist_empty(&stack_trace_hash[i]))
593 c++;
594
595 return c;
596}
597#endif
598
599unsigned int nr_hardirq_chains;
600unsigned int nr_softirq_chains;
601unsigned int nr_process_chains;
602unsigned int max_lockdep_depth;
603
604#ifdef CONFIG_DEBUG_LOCKDEP
605
606
607
608DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
609#endif
610
611#ifdef CONFIG_PROVE_LOCKING
612
613
614
615
616#define __USAGE(__STATE) \
617 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
618 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
619 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
620 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
621
622static const char *usage_str[] =
623{
624#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
625#include "lockdep_states.h"
626#undef LOCKDEP_STATE
627 [LOCK_USED] = "INITIAL USE",
628 [LOCK_USED_READ] = "INITIAL READ USE",
629
630 [LOCK_USAGE_STATES] = "IN-NMI",
631};
632#endif
633
634const char *__get_key_name(const struct lockdep_subclass_key *key, char *str)
635{
636 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
637}
638
639static inline unsigned long lock_flag(enum lock_usage_bit bit)
640{
641 return 1UL << bit;
642}
643
644static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
645{
646
647
648
649
650 char c = '.';
651
652
653
654
655
656
657
658
659
660 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
661 c = '+';
662 if (class->usage_mask & lock_flag(bit))
663 c = '?';
664 } else if (class->usage_mask & lock_flag(bit))
665 c = '-';
666
667 return c;
668}
669
670void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
671{
672 int i = 0;
673
674#define LOCKDEP_STATE(__STATE) \
675 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
676 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
677#include "lockdep_states.h"
678#undef LOCKDEP_STATE
679
680 usage[i] = '\0';
681}
682
683static void __print_lock_name(struct lock_class *class)
684{
685 char str[KSYM_NAME_LEN];
686 const char *name;
687
688 name = class->name;
689 if (!name) {
690 name = __get_key_name(class->key, str);
691 printk(KERN_CONT "%s", name);
692 } else {
693 printk(KERN_CONT "%s", name);
694 if (class->name_version > 1)
695 printk(KERN_CONT "#%d", class->name_version);
696 if (class->subclass)
697 printk(KERN_CONT "/%d", class->subclass);
698 }
699}
700
701static void print_lock_name(struct lock_class *class)
702{
703 char usage[LOCK_USAGE_CHARS];
704
705 get_usage_chars(class, usage);
706
707 printk(KERN_CONT " (");
708 __print_lock_name(class);
709 printk(KERN_CONT "){%s}-{%d:%d}", usage,
710 class->wait_type_outer ?: class->wait_type_inner,
711 class->wait_type_inner);
712}
713
714static void print_lockdep_cache(struct lockdep_map *lock)
715{
716 const char *name;
717 char str[KSYM_NAME_LEN];
718
719 name = lock->name;
720 if (!name)
721 name = __get_key_name(lock->key->subkeys, str);
722
723 printk(KERN_CONT "%s", name);
724}
725
726static void print_lock(struct held_lock *hlock)
727{
728
729
730
731
732
733
734
735
736
737
738 struct lock_class *lock = hlock_class(hlock);
739
740 if (!lock) {
741 printk(KERN_CONT "<RELEASED>\n");
742 return;
743 }
744
745 printk(KERN_CONT "%px", hlock->instance);
746 print_lock_name(lock);
747 printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
748}
749
750static void lockdep_print_held_locks(struct task_struct *p)
751{
752 int i, depth = READ_ONCE(p->lockdep_depth);
753
754 if (!depth)
755 printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
756 else
757 printk("%d lock%s held by %s/%d:\n", depth,
758 depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
759
760
761
762
763 if (p != current && task_is_running(p))
764 return;
765 for (i = 0; i < depth; i++) {
766 printk(" #%d: ", i);
767 print_lock(p->held_locks + i);
768 }
769}
770
771static void print_kernel_ident(void)
772{
773 printk("%s %.*s %s\n", init_utsname()->release,
774 (int)strcspn(init_utsname()->version, " "),
775 init_utsname()->version,
776 print_tainted());
777}
778
779static int very_verbose(struct lock_class *class)
780{
781#if VERY_VERBOSE
782 return class_filter(class);
783#endif
784 return 0;
785}
786
787
788
789
790#ifdef __KERNEL__
791
792
793
794
795
796#ifndef arch_is_kernel_initmem_freed
797static int arch_is_kernel_initmem_freed(unsigned long addr)
798{
799 if (system_state < SYSTEM_FREEING_INITMEM)
800 return 0;
801
802 return init_section_contains((void *)addr, 1);
803}
804#endif
805
806static int static_obj(const void *obj)
807{
808 unsigned long start = (unsigned long) &_stext,
809 end = (unsigned long) &_end,
810 addr = (unsigned long) obj;
811
812 if (arch_is_kernel_initmem_freed(addr))
813 return 0;
814
815
816
817
818 if ((addr >= start) && (addr < end))
819 return 1;
820
821
822
823
824 if (is_kernel_percpu_address(addr))
825 return 1;
826
827
828
829
830 return is_module_address(addr) || is_module_percpu_address(addr);
831}
832#endif
833
834
835
836
837
838
839static int count_matching_names(struct lock_class *new_class)
840{
841 struct lock_class *class;
842 int count = 0;
843
844 if (!new_class->name)
845 return 0;
846
847 list_for_each_entry(class, &all_lock_classes, lock_entry) {
848 if (new_class->key - new_class->subclass == class->key)
849 return class->name_version;
850 if (class->name && !strcmp(class->name, new_class->name))
851 count = max(count, class->name_version);
852 }
853
854 return count + 1;
855}
856
857
858static noinstr struct lock_class *
859look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
860{
861 struct lockdep_subclass_key *key;
862 struct hlist_head *hash_head;
863 struct lock_class *class;
864
865 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
866 instrumentation_begin();
867 debug_locks_off();
868 printk(KERN_ERR
869 "BUG: looking up invalid subclass: %u\n", subclass);
870 printk(KERN_ERR
871 "turning off the locking correctness validator.\n");
872 dump_stack();
873 instrumentation_end();
874 return NULL;
875 }
876
877
878
879
880
881 if (unlikely(!lock->key))
882 return NULL;
883
884
885
886
887
888
889
890 BUILD_BUG_ON(sizeof(struct lock_class_key) >
891 sizeof(struct lockdep_map));
892
893 key = lock->key->subkeys + subclass;
894
895 hash_head = classhashentry(key);
896
897
898
899
900 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
901 return NULL;
902
903 hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) {
904 if (class->key == key) {
905
906
907
908
909 WARN_ON_ONCE(class->name != lock->name &&
910 lock->key != &__lockdep_no_validate__);
911 return class;
912 }
913 }
914
915 return NULL;
916}
917
918
919
920
921
922
923static bool assign_lock_key(struct lockdep_map *lock)
924{
925 unsigned long can_addr, addr = (unsigned long)lock;
926
927#ifdef __KERNEL__
928
929
930
931
932
933
934
935 BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
936#endif
937
938 if (__is_kernel_percpu_address(addr, &can_addr))
939 lock->key = (void *)can_addr;
940 else if (__is_module_percpu_address(addr, &can_addr))
941 lock->key = (void *)can_addr;
942 else if (static_obj(lock))
943 lock->key = (void *)lock;
944 else {
945
946 debug_locks_off();
947 pr_err("INFO: trying to register non-static key.\n");
948 pr_err("The code is fine but needs lockdep annotation, or maybe\n");
949 pr_err("you didn't initialize this object before use?\n");
950 pr_err("turning off the locking correctness validator.\n");
951 dump_stack();
952 return false;
953 }
954
955 return true;
956}
957
958#ifdef CONFIG_DEBUG_LOCKDEP
959
960
961static bool in_list(struct list_head *e, struct list_head *h)
962{
963 struct list_head *f;
964
965 list_for_each(f, h) {
966 if (e == f)
967 return true;
968 }
969
970 return false;
971}
972
973
974
975
976
977static bool in_any_class_list(struct list_head *e)
978{
979 struct lock_class *class;
980 int i;
981
982 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
983 class = &lock_classes[i];
984 if (in_list(e, &class->locks_after) ||
985 in_list(e, &class->locks_before))
986 return true;
987 }
988 return false;
989}
990
991static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
992{
993 struct lock_list *e;
994
995 list_for_each_entry(e, h, entry) {
996 if (e->links_to != c) {
997 printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
998 c->name ? : "(?)",
999 (unsigned long)(e - list_entries),
1000 e->links_to && e->links_to->name ?
1001 e->links_to->name : "(?)",
1002 e->class && e->class->name ? e->class->name :
1003 "(?)");
1004 return false;
1005 }
1006 }
1007 return true;
1008}
1009
1010#ifdef CONFIG_PROVE_LOCKING
1011static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1012#endif
1013
1014static bool check_lock_chain_key(struct lock_chain *chain)
1015{
1016#ifdef CONFIG_PROVE_LOCKING
1017 u64 chain_key = INITIAL_CHAIN_KEY;
1018 int i;
1019
1020 for (i = chain->base; i < chain->base + chain->depth; i++)
1021 chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
1022
1023
1024
1025
1026 if (chain->chain_key != chain_key) {
1027 printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
1028 (unsigned long long)(chain - lock_chains),
1029 (unsigned long long)chain->chain_key,
1030 (unsigned long long)chain_key);
1031 return false;
1032 }
1033#endif
1034 return true;
1035}
1036
1037static bool in_any_zapped_class_list(struct lock_class *class)
1038{
1039 struct pending_free *pf;
1040 int i;
1041
1042 for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
1043 if (in_list(&class->lock_entry, &pf->zapped))
1044 return true;
1045 }
1046
1047 return false;
1048}
1049
1050static bool __check_data_structures(void)
1051{
1052 struct lock_class *class;
1053 struct lock_chain *chain;
1054 struct hlist_head *head;
1055 struct lock_list *e;
1056 int i;
1057
1058
1059 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1060 class = &lock_classes[i];
1061 if (!in_list(&class->lock_entry, &all_lock_classes) &&
1062 !in_list(&class->lock_entry, &free_lock_classes) &&
1063 !in_any_zapped_class_list(class)) {
1064 printk(KERN_INFO "class %px/%s is not in any class list\n",
1065 class, class->name ? : "(?)");
1066 return false;
1067 }
1068 }
1069
1070
1071 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1072 class = &lock_classes[i];
1073 if (!class_lock_list_valid(class, &class->locks_before))
1074 return false;
1075 if (!class_lock_list_valid(class, &class->locks_after))
1076 return false;
1077 }
1078
1079
1080 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
1081 head = chainhash_table + i;
1082 hlist_for_each_entry_rcu(chain, head, entry) {
1083 if (!check_lock_chain_key(chain))
1084 return false;
1085 }
1086 }
1087
1088
1089
1090
1091
1092 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1093 e = list_entries + i;
1094 if (!in_any_class_list(&e->entry)) {
1095 printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
1096 (unsigned int)(e - list_entries),
1097 e->class->name ? : "(?)",
1098 e->links_to->name ? : "(?)");
1099 return false;
1100 }
1101 }
1102
1103
1104
1105
1106
1107 for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1108 e = list_entries + i;
1109 if (in_any_class_list(&e->entry)) {
1110 printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
1111 (unsigned int)(e - list_entries),
1112 e->class && e->class->name ? e->class->name :
1113 "(?)",
1114 e->links_to && e->links_to->name ?
1115 e->links_to->name : "(?)");
1116 return false;
1117 }
1118 }
1119
1120 return true;
1121}
1122
1123int check_consistency = 0;
1124module_param(check_consistency, int, 0644);
1125
1126static void check_data_structures(void)
1127{
1128 static bool once = false;
1129
1130 if (check_consistency && !once) {
1131 if (!__check_data_structures()) {
1132 once = true;
1133 WARN_ON(once);
1134 }
1135 }
1136}
1137
1138#else
1139
1140static inline void check_data_structures(void) { }
1141
1142#endif
1143
1144static void init_chain_block_buckets(void);
1145
1146
1147
1148
1149
1150static void init_data_structures_once(void)
1151{
1152 static bool __read_mostly ds_initialized, rcu_head_initialized;
1153 int i;
1154
1155 if (likely(rcu_head_initialized))
1156 return;
1157
1158 if (system_state >= SYSTEM_SCHEDULING) {
1159 init_rcu_head(&delayed_free.rcu_head);
1160 rcu_head_initialized = true;
1161 }
1162
1163 if (ds_initialized)
1164 return;
1165
1166 ds_initialized = true;
1167
1168 INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
1169 INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
1170
1171 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1172 list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
1173 INIT_LIST_HEAD(&lock_classes[i].locks_after);
1174 INIT_LIST_HEAD(&lock_classes[i].locks_before);
1175 }
1176 init_chain_block_buckets();
1177}
1178
1179static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
1180{
1181 unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
1182
1183 return lock_keys_hash + hash;
1184}
1185
1186
1187void lockdep_register_key(struct lock_class_key *key)
1188{
1189 struct hlist_head *hash_head;
1190 struct lock_class_key *k;
1191 unsigned long flags;
1192
1193 if (WARN_ON_ONCE(static_obj(key)))
1194 return;
1195 hash_head = keyhashentry(key);
1196
1197 raw_local_irq_save(flags);
1198 if (!graph_lock())
1199 goto restore_irqs;
1200 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1201 if (WARN_ON_ONCE(k == key))
1202 goto out_unlock;
1203 }
1204 hlist_add_head_rcu(&key->hash_entry, hash_head);
1205out_unlock:
1206 graph_unlock();
1207restore_irqs:
1208 raw_local_irq_restore(flags);
1209}
1210EXPORT_SYMBOL_GPL(lockdep_register_key);
1211
1212
1213static bool is_dynamic_key(const struct lock_class_key *key)
1214{
1215 struct hlist_head *hash_head;
1216 struct lock_class_key *k;
1217 bool found = false;
1218
1219 if (WARN_ON_ONCE(static_obj(key)))
1220 return false;
1221
1222
1223
1224
1225
1226
1227 if (!debug_locks)
1228 return true;
1229
1230 hash_head = keyhashentry(key);
1231
1232 rcu_read_lock();
1233 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1234 if (k == key) {
1235 found = true;
1236 break;
1237 }
1238 }
1239 rcu_read_unlock();
1240
1241 return found;
1242}
1243
1244
1245
1246
1247
1248
1249static struct lock_class *
1250register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1251{
1252 struct lockdep_subclass_key *key;
1253 struct hlist_head *hash_head;
1254 struct lock_class *class;
1255
1256 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1257
1258 class = look_up_lock_class(lock, subclass);
1259 if (likely(class))
1260 goto out_set_class_cache;
1261
1262 if (!lock->key) {
1263 if (!assign_lock_key(lock))
1264 return NULL;
1265 } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
1266 return NULL;
1267 }
1268
1269 key = lock->key->subkeys + subclass;
1270 hash_head = classhashentry(key);
1271
1272 if (!graph_lock()) {
1273 return NULL;
1274 }
1275
1276
1277
1278
1279 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
1280 if (class->key == key)
1281 goto out_unlock_set;
1282 }
1283
1284 init_data_structures_once();
1285
1286
1287 class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
1288 lock_entry);
1289 if (!class) {
1290 if (!debug_locks_off_graph_unlock()) {
1291 return NULL;
1292 }
1293
1294 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1295 dump_stack();
1296 return NULL;
1297 }
1298 nr_lock_classes++;
1299 __set_bit(class - lock_classes, lock_classes_in_use);
1300 debug_atomic_inc(nr_unused_locks);
1301 class->key = key;
1302 class->name = lock->name;
1303 class->subclass = subclass;
1304 WARN_ON_ONCE(!list_empty(&class->locks_before));
1305 WARN_ON_ONCE(!list_empty(&class->locks_after));
1306 class->name_version = count_matching_names(class);
1307 class->wait_type_inner = lock->wait_type_inner;
1308 class->wait_type_outer = lock->wait_type_outer;
1309 class->lock_type = lock->lock_type;
1310
1311
1312
1313
1314 hlist_add_head_rcu(&class->hash_entry, hash_head);
1315
1316
1317
1318
1319 list_move_tail(&class->lock_entry, &all_lock_classes);
1320
1321 if (verbose(class)) {
1322 graph_unlock();
1323
1324 printk("\nnew class %px: %s", class->key, class->name);
1325 if (class->name_version > 1)
1326 printk(KERN_CONT "#%d", class->name_version);
1327 printk(KERN_CONT "\n");
1328 dump_stack();
1329
1330 if (!graph_lock()) {
1331 return NULL;
1332 }
1333 }
1334out_unlock_set:
1335 graph_unlock();
1336
1337out_set_class_cache:
1338 if (!subclass || force)
1339 lock->class_cache[0] = class;
1340 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
1341 lock->class_cache[subclass] = class;
1342
1343
1344
1345
1346
1347 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1348 return NULL;
1349
1350 return class;
1351}
1352
1353#ifdef CONFIG_PROVE_LOCKING
1354
1355
1356
1357
1358static struct lock_list *alloc_list_entry(void)
1359{
1360 int idx = find_first_zero_bit(list_entries_in_use,
1361 ARRAY_SIZE(list_entries));
1362
1363 if (idx >= ARRAY_SIZE(list_entries)) {
1364 if (!debug_locks_off_graph_unlock())
1365 return NULL;
1366
1367 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1368 dump_stack();
1369 return NULL;
1370 }
1371 nr_list_entries++;
1372 __set_bit(idx, list_entries_in_use);
1373 return list_entries + idx;
1374}
1375
1376
1377
1378
1379static int add_lock_to_list(struct lock_class *this,
1380 struct lock_class *links_to, struct list_head *head,
1381 unsigned long ip, u16 distance, u8 dep,
1382 const struct lock_trace *trace)
1383{
1384 struct lock_list *entry;
1385
1386
1387
1388
1389 entry = alloc_list_entry();
1390 if (!entry)
1391 return 0;
1392
1393 entry->class = this;
1394 entry->links_to = links_to;
1395 entry->dep = dep;
1396 entry->distance = distance;
1397 entry->trace = trace;
1398
1399
1400
1401
1402
1403 list_add_tail_rcu(&entry->entry, head);
1404
1405 return 1;
1406}
1407
1408
1409
1410
1411#define MAX_CIRCULAR_QUEUE_SIZE (1UL << CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS)
1412#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424struct circular_queue {
1425 struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
1426 unsigned int front, rear;
1427};
1428
1429static struct circular_queue lock_cq;
1430
1431unsigned int max_bfs_queue_depth;
1432
1433static unsigned int lockdep_dependency_gen_id;
1434
1435static inline void __cq_init(struct circular_queue *cq)
1436{
1437 cq->front = cq->rear = 0;
1438 lockdep_dependency_gen_id++;
1439}
1440
1441static inline int __cq_empty(struct circular_queue *cq)
1442{
1443 return (cq->front == cq->rear);
1444}
1445
1446static inline int __cq_full(struct circular_queue *cq)
1447{
1448 return ((cq->rear + 1) & CQ_MASK) == cq->front;
1449}
1450
1451static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem)
1452{
1453 if (__cq_full(cq))
1454 return -1;
1455
1456 cq->element[cq->rear] = elem;
1457 cq->rear = (cq->rear + 1) & CQ_MASK;
1458 return 0;
1459}
1460
1461
1462
1463
1464
1465static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
1466{
1467 struct lock_list * lock;
1468
1469 if (__cq_empty(cq))
1470 return NULL;
1471
1472 lock = cq->element[cq->front];
1473 cq->front = (cq->front + 1) & CQ_MASK;
1474
1475 return lock;
1476}
1477
1478static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
1479{
1480 return (cq->rear - cq->front) & CQ_MASK;
1481}
1482
1483static inline void mark_lock_accessed(struct lock_list *lock)
1484{
1485 lock->class->dep_gen_id = lockdep_dependency_gen_id;
1486}
1487
1488static inline void visit_lock_entry(struct lock_list *lock,
1489 struct lock_list *parent)
1490{
1491 lock->parent = parent;
1492}
1493
1494static inline unsigned long lock_accessed(struct lock_list *lock)
1495{
1496 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
1497}
1498
1499static inline struct lock_list *get_lock_parent(struct lock_list *child)
1500{
1501 return child->parent;
1502}
1503
1504static inline int get_lock_depth(struct lock_list *child)
1505{
1506 int depth = 0;
1507 struct lock_list *parent;
1508
1509 while ((parent = get_lock_parent(child))) {
1510 child = parent;
1511 depth++;
1512 }
1513 return depth;
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
1524{
1525 void *lock_class = lock->class;
1526
1527 return lock_class + offset;
1528}
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545enum bfs_result {
1546 BFS_EINVALIDNODE = -2,
1547 BFS_EQUEUEFULL = -1,
1548 BFS_RMATCH = 0,
1549 BFS_RNOMATCH = 1,
1550};
1551
1552
1553
1554
1555static inline bool bfs_error(enum bfs_result res)
1556{
1557 return res < 0;
1558}
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575#define DEP_SR_BIT (0 + (0 << 1))
1576#define DEP_ER_BIT (1 + (0 << 1))
1577#define DEP_SN_BIT (0 + (1 << 1))
1578#define DEP_EN_BIT (1 + (1 << 1))
1579
1580#define DEP_SR_MASK (1U << (DEP_SR_BIT))
1581#define DEP_ER_MASK (1U << (DEP_ER_BIT))
1582#define DEP_SN_MASK (1U << (DEP_SN_BIT))
1583#define DEP_EN_MASK (1U << (DEP_EN_BIT))
1584
1585static inline unsigned int
1586__calc_dep_bit(struct held_lock *prev, struct held_lock *next)
1587{
1588 return (prev->read == 0) + ((next->read != 2) << 1);
1589}
1590
1591static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next)
1592{
1593 return 1U << __calc_dep_bit(prev, next);
1594}
1595
1596
1597
1598
1599
1600static inline unsigned int
1601__calc_dep_bitb(struct held_lock *prev, struct held_lock *next)
1602{
1603 return (next->read != 2) + ((prev->read == 0) << 1);
1604}
1605
1606static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next)
1607{
1608 return 1U << __calc_dep_bitb(prev, next);
1609}
1610
1611
1612
1613
1614
1615static inline void __bfs_init_root(struct lock_list *lock,
1616 struct lock_class *class)
1617{
1618 lock->class = class;
1619 lock->parent = NULL;
1620 lock->only_xr = 0;
1621}
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631static inline void bfs_init_root(struct lock_list *lock,
1632 struct held_lock *hlock)
1633{
1634 __bfs_init_root(lock, hlock_class(hlock));
1635 lock->only_xr = (hlock->read == 2);
1636}
1637
1638
1639
1640
1641
1642
1643
1644
1645static inline void bfs_init_rootb(struct lock_list *lock,
1646 struct held_lock *hlock)
1647{
1648 __bfs_init_root(lock, hlock_class(hlock));
1649 lock->only_xr = (hlock->read != 0);
1650}
1651
1652static inline struct lock_list *__bfs_next(struct lock_list *lock, int offset)
1653{
1654 if (!lock || !lock->parent)
1655 return NULL;
1656
1657 return list_next_or_null_rcu(get_dep_list(lock->parent, offset),
1658 &lock->entry, struct lock_list, entry);
1659}
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688static enum bfs_result __bfs(struct lock_list *source_entry,
1689 void *data,
1690 bool (*match)(struct lock_list *entry, void *data),
1691 bool (*skip)(struct lock_list *entry, void *data),
1692 struct lock_list **target_entry,
1693 int offset)
1694{
1695 struct circular_queue *cq = &lock_cq;
1696 struct lock_list *lock = NULL;
1697 struct lock_list *entry;
1698 struct list_head *head;
1699 unsigned int cq_depth;
1700 bool first;
1701
1702 lockdep_assert_locked();
1703
1704 __cq_init(cq);
1705 __cq_enqueue(cq, source_entry);
1706
1707 while ((lock = __bfs_next(lock, offset)) || (lock = __cq_dequeue(cq))) {
1708 if (!lock->class)
1709 return BFS_EINVALIDNODE;
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 if (lock_accessed(lock))
1721 continue;
1722 else
1723 mark_lock_accessed(lock);
1724
1725
1726
1727
1728
1729 if (lock->parent) {
1730 u8 dep = lock->dep;
1731 bool prev_only_xr = lock->parent->only_xr;
1732
1733
1734
1735
1736
1737
1738 if (prev_only_xr)
1739 dep &= ~(DEP_SR_MASK | DEP_SN_MASK);
1740
1741
1742 if (!dep)
1743 continue;
1744
1745
1746 lock->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK));
1747 }
1748
1749
1750
1751
1752
1753
1754
1755 if (skip && skip(lock, data))
1756 continue;
1757
1758 if (match(lock, data)) {
1759 *target_entry = lock;
1760 return BFS_RMATCH;
1761 }
1762
1763
1764
1765
1766
1767
1768 first = true;
1769 head = get_dep_list(lock, offset);
1770 list_for_each_entry_rcu(entry, head, entry) {
1771 visit_lock_entry(entry, lock);
1772
1773
1774
1775
1776
1777
1778
1779 if (!first)
1780 continue;
1781
1782 first = false;
1783
1784 if (__cq_enqueue(cq, entry))
1785 return BFS_EQUEUEFULL;
1786
1787 cq_depth = __cq_get_elem_count(cq);
1788 if (max_bfs_queue_depth < cq_depth)
1789 max_bfs_queue_depth = cq_depth;
1790 }
1791 }
1792
1793 return BFS_RNOMATCH;
1794}
1795
1796static inline enum bfs_result
1797__bfs_forwards(struct lock_list *src_entry,
1798 void *data,
1799 bool (*match)(struct lock_list *entry, void *data),
1800 bool (*skip)(struct lock_list *entry, void *data),
1801 struct lock_list **target_entry)
1802{
1803 return __bfs(src_entry, data, match, skip, target_entry,
1804 offsetof(struct lock_class, locks_after));
1805
1806}
1807
1808static inline enum bfs_result
1809__bfs_backwards(struct lock_list *src_entry,
1810 void *data,
1811 bool (*match)(struct lock_list *entry, void *data),
1812 bool (*skip)(struct lock_list *entry, void *data),
1813 struct lock_list **target_entry)
1814{
1815 return __bfs(src_entry, data, match, skip, target_entry,
1816 offsetof(struct lock_class, locks_before));
1817
1818}
1819
1820static void print_lock_trace(const struct lock_trace *trace,
1821 unsigned int spaces)
1822{
1823 stack_trace_print(trace->entries, trace->nr_entries, spaces);
1824}
1825
1826
1827
1828
1829
1830static noinline void
1831print_circular_bug_entry(struct lock_list *target, int depth)
1832{
1833 if (debug_locks_silent)
1834 return;
1835 printk("\n-> #%u", depth);
1836 print_lock_name(target->class);
1837 printk(KERN_CONT ":\n");
1838 print_lock_trace(target->trace, 6);
1839}
1840
1841static void
1842print_circular_lock_scenario(struct held_lock *src,
1843 struct held_lock *tgt,
1844 struct lock_list *prt)
1845{
1846 struct lock_class *source = hlock_class(src);
1847 struct lock_class *target = hlock_class(tgt);
1848 struct lock_class *parent = prt->class;
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863 if (parent != source) {
1864 printk("Chain exists of:\n ");
1865 __print_lock_name(source);
1866 printk(KERN_CONT " --> ");
1867 __print_lock_name(parent);
1868 printk(KERN_CONT " --> ");
1869 __print_lock_name(target);
1870 printk(KERN_CONT "\n\n");
1871 }
1872
1873 printk(" Possible unsafe locking scenario:\n\n");
1874 printk(" CPU0 CPU1\n");
1875 printk(" ---- ----\n");
1876 printk(" lock(");
1877 __print_lock_name(target);
1878 printk(KERN_CONT ");\n");
1879 printk(" lock(");
1880 __print_lock_name(parent);
1881 printk(KERN_CONT ");\n");
1882 printk(" lock(");
1883 __print_lock_name(target);
1884 printk(KERN_CONT ");\n");
1885 printk(" lock(");
1886 __print_lock_name(source);
1887 printk(KERN_CONT ");\n");
1888 printk("\n *** DEADLOCK ***\n\n");
1889}
1890
1891
1892
1893
1894
1895static noinline void
1896print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1897 struct held_lock *check_src,
1898 struct held_lock *check_tgt)
1899{
1900 struct task_struct *curr = current;
1901
1902 if (debug_locks_silent)
1903 return;
1904
1905 pr_warn("\n");
1906 pr_warn("======================================================\n");
1907 pr_warn("WARNING: possible circular locking dependency detected\n");
1908 print_kernel_ident();
1909 pr_warn("------------------------------------------------------\n");
1910 pr_warn("%s/%d is trying to acquire lock:\n",
1911 curr->comm, task_pid_nr(curr));
1912 print_lock(check_src);
1913
1914 pr_warn("\nbut task is already holding lock:\n");
1915
1916 print_lock(check_tgt);
1917 pr_warn("\nwhich lock already depends on the new lock.\n\n");
1918 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1919
1920 print_circular_bug_entry(entry, depth);
1921}
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949static inline bool hlock_equal(struct lock_list *entry, void *data)
1950{
1951 struct held_lock *hlock = (struct held_lock *)data;
1952
1953 return hlock_class(hlock) == entry->class &&
1954 (hlock->read == 2 ||
1955 !entry->only_xr);
1956}
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976static inline bool hlock_conflict(struct lock_list *entry, void *data)
1977{
1978 struct held_lock *hlock = (struct held_lock *)data;
1979
1980 return hlock_class(hlock) == entry->class &&
1981 (hlock->read == 0 ||
1982 !entry->only_xr);
1983}
1984
1985static noinline void print_circular_bug(struct lock_list *this,
1986 struct lock_list *target,
1987 struct held_lock *check_src,
1988 struct held_lock *check_tgt)
1989{
1990 struct task_struct *curr = current;
1991 struct lock_list *parent;
1992 struct lock_list *first_parent;
1993 int depth;
1994
1995 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1996 return;
1997
1998 this->trace = save_trace();
1999 if (!this->trace)
2000 return;
2001
2002 depth = get_lock_depth(target);
2003
2004 print_circular_bug_header(target, depth, check_src, check_tgt);
2005
2006 parent = get_lock_parent(target);
2007 first_parent = parent;
2008
2009 while (parent) {
2010 print_circular_bug_entry(parent, --depth);
2011 parent = get_lock_parent(parent);
2012 }
2013
2014 printk("\nother info that might help us debug this:\n\n");
2015 print_circular_lock_scenario(check_src, check_tgt,
2016 first_parent);
2017
2018 lockdep_print_held_locks(curr);
2019
2020 printk("\nstack backtrace:\n");
2021 dump_stack();
2022}
2023
2024static noinline void print_bfs_bug(int ret)
2025{
2026 if (!debug_locks_off_graph_unlock())
2027 return;
2028
2029
2030
2031
2032 WARN(1, "lockdep bfs error:%d\n", ret);
2033}
2034
2035static bool noop_count(struct lock_list *entry, void *data)
2036{
2037 (*(unsigned long *)data)++;
2038 return false;
2039}
2040
2041static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
2042{
2043 unsigned long count = 0;
2044 struct lock_list *target_entry;
2045
2046 __bfs_forwards(this, (void *)&count, noop_count, NULL, &target_entry);
2047
2048 return count;
2049}
2050unsigned long lockdep_count_forward_deps(struct lock_class *class)
2051{
2052 unsigned long ret, flags;
2053 struct lock_list this;
2054
2055 __bfs_init_root(&this, class);
2056
2057 raw_local_irq_save(flags);
2058 lockdep_lock();
2059 ret = __lockdep_count_forward_deps(&this);
2060 lockdep_unlock();
2061 raw_local_irq_restore(flags);
2062
2063 return ret;
2064}
2065
2066static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
2067{
2068 unsigned long count = 0;
2069 struct lock_list *target_entry;
2070
2071 __bfs_backwards(this, (void *)&count, noop_count, NULL, &target_entry);
2072
2073 return count;
2074}
2075
2076unsigned long lockdep_count_backward_deps(struct lock_class *class)
2077{
2078 unsigned long ret, flags;
2079 struct lock_list this;
2080
2081 __bfs_init_root(&this, class);
2082
2083 raw_local_irq_save(flags);
2084 lockdep_lock();
2085 ret = __lockdep_count_backward_deps(&this);
2086 lockdep_unlock();
2087 raw_local_irq_restore(flags);
2088
2089 return ret;
2090}
2091
2092
2093
2094
2095
2096static noinline enum bfs_result
2097check_path(struct held_lock *target, struct lock_list *src_entry,
2098 bool (*match)(struct lock_list *entry, void *data),
2099 bool (*skip)(struct lock_list *entry, void *data),
2100 struct lock_list **target_entry)
2101{
2102 enum bfs_result ret;
2103
2104 ret = __bfs_forwards(src_entry, target, match, skip, target_entry);
2105
2106 if (unlikely(bfs_error(ret)))
2107 print_bfs_bug(ret);
2108
2109 return ret;
2110}
2111
2112
2113
2114
2115
2116
2117
2118
2119static noinline enum bfs_result
2120check_noncircular(struct held_lock *src, struct held_lock *target,
2121 struct lock_trace **const trace)
2122{
2123 enum bfs_result ret;
2124 struct lock_list *target_entry;
2125 struct lock_list src_entry;
2126
2127 bfs_init_root(&src_entry, src);
2128
2129 debug_atomic_inc(nr_cyclic_checks);
2130
2131 ret = check_path(target, &src_entry, hlock_conflict, NULL, &target_entry);
2132
2133 if (unlikely(ret == BFS_RMATCH)) {
2134 if (!*trace) {
2135
2136
2137
2138
2139
2140 *trace = save_trace();
2141 }
2142
2143 print_circular_bug(&src_entry, target_entry, src, target);
2144 }
2145
2146 return ret;
2147}
2148
2149#ifdef CONFIG_TRACE_IRQFLAGS
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192static inline bool usage_accumulate(struct lock_list *entry, void *mask)
2193{
2194 if (!entry->only_xr)
2195 *(unsigned long *)mask |= entry->class->usage_mask;
2196 else
2197 *(unsigned long *)mask |= (entry->class->usage_mask & LOCKF_IRQ);
2198
2199 return false;
2200}
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211static inline bool usage_match(struct lock_list *entry, void *mask)
2212{
2213 if (!entry->only_xr)
2214 return !!(entry->class->usage_mask & *(unsigned long *)mask);
2215 else
2216 return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask);
2217}
2218
2219static inline bool usage_skip(struct lock_list *entry, void *mask)
2220{
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247 if (entry->class->lock_type == LD_LOCK_PERCPU) {
2248 if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
2249 return false;
2250
2251 return true;
2252 }
2253
2254 return false;
2255}
2256
2257
2258
2259
2260
2261
2262
2263
2264static enum bfs_result
2265find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
2266 struct lock_list **target_entry)
2267{
2268 enum bfs_result result;
2269
2270 debug_atomic_inc(nr_find_usage_forwards_checks);
2271
2272 result = __bfs_forwards(root, &usage_mask, usage_match, usage_skip, target_entry);
2273
2274 return result;
2275}
2276
2277
2278
2279
2280
2281static enum bfs_result
2282find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
2283 struct lock_list **target_entry)
2284{
2285 enum bfs_result result;
2286
2287 debug_atomic_inc(nr_find_usage_backwards_checks);
2288
2289 result = __bfs_backwards(root, &usage_mask, usage_match, usage_skip, target_entry);
2290
2291 return result;
2292}
2293
2294static void print_lock_class_header(struct lock_class *class, int depth)
2295{
2296 int bit;
2297
2298 printk("%*s->", depth, "");
2299 print_lock_name(class);
2300#ifdef CONFIG_DEBUG_LOCKDEP
2301 printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
2302#endif
2303 printk(KERN_CONT " {\n");
2304
2305 for (bit = 0; bit < LOCK_TRACE_STATES; bit++) {
2306 if (class->usage_mask & (1 << bit)) {
2307 int len = depth;
2308
2309 len += printk("%*s %s", depth, "", usage_str[bit]);
2310 len += printk(KERN_CONT " at:\n");
2311 print_lock_trace(class->usage_traces[bit], len);
2312 }
2313 }
2314 printk("%*s }\n", depth, "");
2315
2316 printk("%*s ... key at: [<%px>] %pS\n",
2317 depth, "", class->key, class->key);
2318}
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372static void __used
2373print_shortest_lock_dependencies(struct lock_list *leaf,
2374 struct lock_list *root)
2375{
2376 struct lock_list *entry = leaf;
2377 int depth;
2378
2379
2380 depth = get_lock_depth(leaf);
2381
2382 do {
2383 print_lock_class_header(entry->class, depth);
2384 printk("%*s ... acquired at:\n", depth, "");
2385 print_lock_trace(entry->trace, 2);
2386 printk("\n");
2387
2388 if (depth == 0 && (entry != root)) {
2389 printk("lockdep:%s bad path found in chain graph\n", __func__);
2390 break;
2391 }
2392
2393 entry = get_lock_parent(entry);
2394 depth--;
2395 } while (entry && (depth >= 0));
2396}
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418static void __used
2419print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
2420 struct lock_list *root)
2421{
2422 struct lock_list *entry = leaf;
2423 const struct lock_trace *trace = NULL;
2424 int depth;
2425
2426
2427 depth = get_lock_depth(leaf);
2428
2429 do {
2430 print_lock_class_header(entry->class, depth);
2431 if (trace) {
2432 printk("%*s ... acquired at:\n", depth, "");
2433 print_lock_trace(trace, 2);
2434 printk("\n");
2435 }
2436
2437
2438
2439
2440
2441 trace = entry->trace;
2442
2443 if (depth == 0 && (entry != root)) {
2444 printk("lockdep:%s bad path found in chain graph\n", __func__);
2445 break;
2446 }
2447
2448 entry = get_lock_parent(entry);
2449 depth--;
2450 } while (entry && (depth >= 0));
2451}
2452
2453static void
2454print_irq_lock_scenario(struct lock_list *safe_entry,
2455 struct lock_list *unsafe_entry,
2456 struct lock_class *prev_class,
2457 struct lock_class *next_class)
2458{
2459 struct lock_class *safe_class = safe_entry->class;
2460 struct lock_class *unsafe_class = unsafe_entry->class;
2461 struct lock_class *middle_class = prev_class;
2462
2463 if (middle_class == safe_class)
2464 middle_class = next_class;
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479 if (middle_class != unsafe_class) {
2480 printk("Chain exists of:\n ");
2481 __print_lock_name(safe_class);
2482 printk(KERN_CONT " --> ");
2483 __print_lock_name(middle_class);
2484 printk(KERN_CONT " --> ");
2485 __print_lock_name(unsafe_class);
2486 printk(KERN_CONT "\n\n");
2487 }
2488
2489 printk(" Possible interrupt unsafe locking scenario:\n\n");
2490 printk(" CPU0 CPU1\n");
2491 printk(" ---- ----\n");
2492 printk(" lock(");
2493 __print_lock_name(unsafe_class);
2494 printk(KERN_CONT ");\n");
2495 printk(" local_irq_disable();\n");
2496 printk(" lock(");
2497 __print_lock_name(safe_class);
2498 printk(KERN_CONT ");\n");
2499 printk(" lock(");
2500 __print_lock_name(middle_class);
2501 printk(KERN_CONT ");\n");
2502 printk(" <Interrupt>\n");
2503 printk(" lock(");
2504 __print_lock_name(safe_class);
2505 printk(KERN_CONT ");\n");
2506 printk("\n *** DEADLOCK ***\n\n");
2507}
2508
2509static void
2510print_bad_irq_dependency(struct task_struct *curr,
2511 struct lock_list *prev_root,
2512 struct lock_list *next_root,
2513 struct lock_list *backwards_entry,
2514 struct lock_list *forwards_entry,
2515 struct held_lock *prev,
2516 struct held_lock *next,
2517 enum lock_usage_bit bit1,
2518 enum lock_usage_bit bit2,
2519 const char *irqclass)
2520{
2521 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2522 return;
2523
2524 pr_warn("\n");
2525 pr_warn("=====================================================\n");
2526 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
2527 irqclass, irqclass);
2528 print_kernel_ident();
2529 pr_warn("-----------------------------------------------------\n");
2530 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
2531 curr->comm, task_pid_nr(curr),
2532 lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
2533 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
2534 lockdep_hardirqs_enabled(),
2535 curr->softirqs_enabled);
2536 print_lock(next);
2537
2538 pr_warn("\nand this task is already holding:\n");
2539 print_lock(prev);
2540 pr_warn("which would create a new lock dependency:\n");
2541 print_lock_name(hlock_class(prev));
2542 pr_cont(" ->");
2543 print_lock_name(hlock_class(next));
2544 pr_cont("\n");
2545
2546 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
2547 irqclass);
2548 print_lock_name(backwards_entry->class);
2549 pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
2550
2551 print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
2552
2553 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
2554 print_lock_name(forwards_entry->class);
2555 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
2556 pr_warn("...");
2557
2558 print_lock_trace(forwards_entry->class->usage_traces[bit2], 1);
2559
2560 pr_warn("\nother info that might help us debug this:\n\n");
2561 print_irq_lock_scenario(backwards_entry, forwards_entry,
2562 hlock_class(prev), hlock_class(next));
2563
2564 lockdep_print_held_locks(curr);
2565
2566 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
2567 print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
2568
2569 pr_warn("\nthe dependencies between the lock to be acquired");
2570 pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
2571 next_root->trace = save_trace();
2572 if (!next_root->trace)
2573 return;
2574 print_shortest_lock_dependencies(forwards_entry, next_root);
2575
2576 pr_warn("\nstack backtrace:\n");
2577 dump_stack();
2578}
2579
2580static const char *state_names[] = {
2581#define LOCKDEP_STATE(__STATE) \
2582 __stringify(__STATE),
2583#include "lockdep_states.h"
2584#undef LOCKDEP_STATE
2585};
2586
2587static const char *state_rnames[] = {
2588#define LOCKDEP_STATE(__STATE) \
2589 __stringify(__STATE)"-READ",
2590#include "lockdep_states.h"
2591#undef LOCKDEP_STATE
2592};
2593
2594static inline const char *state_name(enum lock_usage_bit bit)
2595{
2596 if (bit & LOCK_USAGE_READ_MASK)
2597 return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
2598 else
2599 return state_names[bit >> LOCK_USAGE_DIR_MASK];
2600}
2601
2602
2603
2604
2605
2606
2607
2608
2609static int exclusive_bit(int new_bit)
2610{
2611 int state = new_bit & LOCK_USAGE_STATE_MASK;
2612 int dir = new_bit & LOCK_USAGE_DIR_MASK;
2613
2614
2615
2616
2617 return state | (dir ^ LOCK_USAGE_DIR_MASK);
2618}
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634static unsigned long invert_dir_mask(unsigned long mask)
2635{
2636 unsigned long excl = 0;
2637
2638
2639 excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
2640 excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
2641
2642 return excl;
2643}
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673static unsigned long exclusive_mask(unsigned long mask)
2674{
2675 unsigned long excl = invert_dir_mask(mask);
2676
2677 excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2678 excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2679
2680 return excl;
2681}
2682
2683
2684
2685
2686
2687
2688
2689
2690static unsigned long original_mask(unsigned long mask)
2691{
2692 unsigned long excl = invert_dir_mask(mask);
2693
2694
2695 excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2696 excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2697
2698 return excl;
2699}
2700
2701
2702
2703
2704
2705static int find_exclusive_match(unsigned long mask,
2706 unsigned long excl_mask,
2707 enum lock_usage_bit *bitp,
2708 enum lock_usage_bit *excl_bitp)
2709{
2710 int bit, excl, excl_read;
2711
2712 for_each_set_bit(bit, &mask, LOCK_USED) {
2713
2714
2715
2716
2717
2718 excl = exclusive_bit(bit);
2719 excl_read = excl | LOCK_USAGE_READ_MASK;
2720 if (excl_mask & lock_flag(excl)) {
2721 *bitp = bit;
2722 *excl_bitp = excl;
2723 return 0;
2724 } else if (excl_mask & lock_flag(excl_read)) {
2725 *bitp = bit;
2726 *excl_bitp = excl_read;
2727 return 0;
2728 }
2729 }
2730 return -1;
2731}
2732
2733
2734
2735
2736
2737
2738
2739static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
2740 struct held_lock *next)
2741{
2742 unsigned long usage_mask = 0, forward_mask, backward_mask;
2743 enum lock_usage_bit forward_bit = 0, backward_bit = 0;
2744 struct lock_list *target_entry1;
2745 struct lock_list *target_entry;
2746 struct lock_list this, that;
2747 enum bfs_result ret;
2748
2749
2750
2751
2752
2753 bfs_init_rootb(&this, prev);
2754
2755 ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, usage_skip, NULL);
2756 if (bfs_error(ret)) {
2757 print_bfs_bug(ret);
2758 return 0;
2759 }
2760
2761 usage_mask &= LOCKF_USED_IN_IRQ_ALL;
2762 if (!usage_mask)
2763 return 1;
2764
2765
2766
2767
2768
2769 forward_mask = exclusive_mask(usage_mask);
2770
2771 bfs_init_root(&that, next);
2772
2773 ret = find_usage_forwards(&that, forward_mask, &target_entry1);
2774 if (bfs_error(ret)) {
2775 print_bfs_bug(ret);
2776 return 0;
2777 }
2778 if (ret == BFS_RNOMATCH)
2779 return 1;
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796 backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
2797
2798 ret = find_usage_backwards(&this, backward_mask, &target_entry);
2799 if (bfs_error(ret)) {
2800 print_bfs_bug(ret);
2801 return 0;
2802 }
2803 if (DEBUG_LOCKS_WARN_ON(ret == BFS_RNOMATCH))
2804 return 1;
2805
2806
2807
2808
2809
2810 ret = find_exclusive_match(target_entry->class->usage_mask,
2811 target_entry1->class->usage_mask,
2812 &backward_bit, &forward_bit);
2813 if (DEBUG_LOCKS_WARN_ON(ret == -1))
2814 return 1;
2815
2816 print_bad_irq_dependency(curr, &this, &that,
2817 target_entry, target_entry1,
2818 prev, next,
2819 backward_bit, forward_bit,
2820 state_name(backward_bit));
2821
2822 return 0;
2823}
2824
2825#else
2826
2827static inline int check_irq_usage(struct task_struct *curr,
2828 struct held_lock *prev, struct held_lock *next)
2829{
2830 return 1;
2831}
2832
2833static inline bool usage_skip(struct lock_list *entry, void *mask)
2834{
2835 return false;
2836}
2837
2838#endif
2839
2840#ifdef CONFIG_LOCKDEP_SMALL
2841
2842
2843
2844
2845
2846
2847
2848
2849static noinline enum bfs_result
2850check_redundant(struct held_lock *src, struct held_lock *target)
2851{
2852 enum bfs_result ret;
2853 struct lock_list *target_entry;
2854 struct lock_list src_entry;
2855
2856 bfs_init_root(&src_entry, src);
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867 src_entry.only_xr = src->read == 0;
2868
2869 debug_atomic_inc(nr_redundant_checks);
2870
2871
2872
2873
2874
2875
2876 ret = check_path(target, &src_entry, hlock_equal, usage_skip, &target_entry);
2877
2878 if (ret == BFS_RMATCH)
2879 debug_atomic_inc(nr_redundant);
2880
2881 return ret;
2882}
2883
2884#else
2885
2886static inline enum bfs_result
2887check_redundant(struct held_lock *src, struct held_lock *target)
2888{
2889 return BFS_RNOMATCH;
2890}
2891
2892#endif
2893
2894static void inc_chains(int irq_context)
2895{
2896 if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2897 nr_hardirq_chains++;
2898 else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2899 nr_softirq_chains++;
2900 else
2901 nr_process_chains++;
2902}
2903
2904static void dec_chains(int irq_context)
2905{
2906 if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2907 nr_hardirq_chains--;
2908 else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2909 nr_softirq_chains--;
2910 else
2911 nr_process_chains--;
2912}
2913
2914static void
2915print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
2916{
2917 struct lock_class *next = hlock_class(nxt);
2918 struct lock_class *prev = hlock_class(prv);
2919
2920 printk(" Possible unsafe locking scenario:\n\n");
2921 printk(" CPU0\n");
2922 printk(" ----\n");
2923 printk(" lock(");
2924 __print_lock_name(prev);
2925 printk(KERN_CONT ");\n");
2926 printk(" lock(");
2927 __print_lock_name(next);
2928 printk(KERN_CONT ");\n");
2929 printk("\n *** DEADLOCK ***\n\n");
2930 printk(" May be due to missing lock nesting notation\n\n");
2931}
2932
2933static void
2934print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
2935 struct held_lock *next)
2936{
2937 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2938 return;
2939
2940 pr_warn("\n");
2941 pr_warn("============================================\n");
2942 pr_warn("WARNING: possible recursive locking detected\n");
2943 print_kernel_ident();
2944 pr_warn("--------------------------------------------\n");
2945 pr_warn("%s/%d is trying to acquire lock:\n",
2946 curr->comm, task_pid_nr(curr));
2947 print_lock(next);
2948 pr_warn("\nbut task is already holding lock:\n");
2949 print_lock(prev);
2950
2951 pr_warn("\nother info that might help us debug this:\n");
2952 print_deadlock_scenario(next, prev);
2953 lockdep_print_held_locks(curr);
2954
2955 pr_warn("\nstack backtrace:\n");
2956 dump_stack();
2957}
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969static int
2970check_deadlock(struct task_struct *curr, struct held_lock *next)
2971{
2972 struct held_lock *prev;
2973 struct held_lock *nest = NULL;
2974 int i;
2975
2976 for (i = 0; i < curr->lockdep_depth; i++) {
2977 prev = curr->held_locks + i;
2978
2979 if (prev->instance == next->nest_lock)
2980 nest = prev;
2981
2982 if (hlock_class(prev) != hlock_class(next))
2983 continue;
2984
2985
2986
2987
2988
2989 if ((next->read == 2) && prev->read)
2990 continue;
2991
2992
2993
2994
2995
2996 if (nest)
2997 return 2;
2998
2999 print_deadlock_bug(curr, prev, next);
3000 return 0;
3001 }
3002 return 1;
3003}
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027static int
3028check_prev_add(struct task_struct *curr, struct held_lock *prev,
3029 struct held_lock *next, u16 distance,
3030 struct lock_trace **const trace)
3031{
3032 struct lock_list *entry;
3033 enum bfs_result ret;
3034
3035 if (!hlock_class(prev)->key || !hlock_class(next)->key) {
3036
3037
3038
3039
3040
3041
3042 WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
3043 "Detected use-after-free of lock class %px/%s\n",
3044 hlock_class(prev),
3045 hlock_class(prev)->name);
3046 WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
3047 "Detected use-after-free of lock class %px/%s\n",
3048 hlock_class(next),
3049 hlock_class(next)->name);
3050 return 2;
3051 }
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063 ret = check_noncircular(next, prev, trace);
3064 if (unlikely(bfs_error(ret) || ret == BFS_RMATCH))
3065 return 0;
3066
3067 if (!check_irq_usage(curr, prev, next))
3068 return 0;
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
3079 if (entry->class == hlock_class(next)) {
3080 if (distance == 1)
3081 entry->distance = 1;
3082 entry->dep |= calc_dep(prev, next);
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100 list_for_each_entry(entry, &hlock_class(next)->locks_before, entry) {
3101 if (entry->class == hlock_class(prev)) {
3102 if (distance == 1)
3103 entry->distance = 1;
3104 entry->dep |= calc_depb(prev, next);
3105 return 1;
3106 }
3107 }
3108
3109
3110 return 0;
3111 }
3112 }
3113
3114
3115
3116
3117 ret = check_redundant(prev, next);
3118 if (bfs_error(ret))
3119 return 0;
3120 else if (ret == BFS_RMATCH)
3121 return 2;
3122
3123 if (!*trace) {
3124 *trace = save_trace();
3125 if (!*trace)
3126 return 0;
3127 }
3128
3129
3130
3131
3132
3133 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
3134 &hlock_class(prev)->locks_after,
3135 next->acquire_ip, distance,
3136 calc_dep(prev, next),
3137 *trace);
3138
3139 if (!ret)
3140 return 0;
3141
3142 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
3143 &hlock_class(next)->locks_before,
3144 next->acquire_ip, distance,
3145 calc_depb(prev, next),
3146 *trace);
3147 if (!ret)
3148 return 0;
3149
3150 return 2;
3151}
3152
3153
3154
3155
3156
3157
3158
3159static int
3160check_prevs_add(struct task_struct *curr, struct held_lock *next)
3161{
3162 struct lock_trace *trace = NULL;
3163 int depth = curr->lockdep_depth;
3164 struct held_lock *hlock;
3165
3166
3167
3168
3169
3170
3171 if (!depth)
3172 goto out_bug;
3173
3174
3175
3176
3177 if (curr->held_locks[depth].irq_context !=
3178 curr->held_locks[depth-1].irq_context)
3179 goto out_bug;
3180
3181 for (;;) {
3182 u16 distance = curr->lockdep_depth - depth + 1;
3183 hlock = curr->held_locks + depth - 1;
3184
3185 if (hlock->check) {
3186 int ret = check_prev_add(curr, hlock, next, distance, &trace);
3187 if (!ret)
3188 return 0;
3189
3190
3191
3192
3193
3194
3195
3196 if (!hlock->trylock)
3197 break;
3198 }
3199
3200 depth--;
3201
3202
3203
3204 if (!depth)
3205 break;
3206
3207
3208
3209 if (curr->held_locks[depth].irq_context !=
3210 curr->held_locks[depth-1].irq_context)
3211 break;
3212 }
3213 return 1;
3214out_bug:
3215 if (!debug_locks_off_graph_unlock())
3216 return 0;
3217
3218
3219
3220
3221
3222
3223 WARN_ON(1);
3224
3225 return 0;
3226}
3227
3228struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
3229static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
3230static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
3231unsigned long nr_zapped_lock_chains;
3232unsigned int nr_free_chain_hlocks;
3233unsigned int nr_lost_chain_hlocks;
3234unsigned int nr_large_chain_blocks;
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253#define MAX_CHAIN_BUCKETS 16
3254#define CHAIN_BLK_FLAG (1U << 15)
3255#define CHAIN_BLK_LIST_END 0xFFFFU
3256
3257static int chain_block_buckets[MAX_CHAIN_BUCKETS];
3258
3259static inline int size_to_bucket(int size)
3260{
3261 if (size > MAX_CHAIN_BUCKETS)
3262 return 0;
3263
3264 return size - 1;
3265}
3266
3267
3268
3269
3270#define for_each_chain_block(bucket, prev, curr) \
3271 for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \
3272 (curr) >= 0; \
3273 (prev) = (curr), (curr) = chain_block_next(curr))
3274
3275
3276
3277
3278static inline int chain_block_next(int offset)
3279{
3280 int next = chain_hlocks[offset];
3281
3282 WARN_ON_ONCE(!(next & CHAIN_BLK_FLAG));
3283
3284 if (next == CHAIN_BLK_LIST_END)
3285 return -1;
3286
3287 next &= ~CHAIN_BLK_FLAG;
3288 next <<= 16;
3289 next |= chain_hlocks[offset + 1];
3290
3291 return next;
3292}
3293
3294
3295
3296
3297static inline int chain_block_size(int offset)
3298{
3299 return (chain_hlocks[offset + 2] << 16) | chain_hlocks[offset + 3];
3300}
3301
3302static inline void init_chain_block(int offset, int next, int bucket, int size)
3303{
3304 chain_hlocks[offset] = (next >> 16) | CHAIN_BLK_FLAG;
3305 chain_hlocks[offset + 1] = (u16)next;
3306
3307 if (size && !bucket) {
3308 chain_hlocks[offset + 2] = size >> 16;
3309 chain_hlocks[offset + 3] = (u16)size;
3310 }
3311}
3312
3313static inline void add_chain_block(int offset, int size)
3314{
3315 int bucket = size_to_bucket(size);
3316 int next = chain_block_buckets[bucket];
3317 int prev, curr;
3318
3319 if (unlikely(size < 2)) {
3320
3321
3322
3323
3324
3325
3326
3327 if (size)
3328 nr_lost_chain_hlocks++;
3329 return;
3330 }
3331
3332 nr_free_chain_hlocks += size;
3333 if (!bucket) {
3334 nr_large_chain_blocks++;
3335
3336
3337
3338
3339 for_each_chain_block(0, prev, curr) {
3340 if (size >= chain_block_size(curr))
3341 break;
3342 }
3343 init_chain_block(offset, curr, 0, size);
3344 if (prev < 0)
3345 chain_block_buckets[0] = offset;
3346 else
3347 init_chain_block(prev, offset, 0, 0);
3348 return;
3349 }
3350
3351
3352
3353 init_chain_block(offset, next, bucket, size);
3354 chain_block_buckets[bucket] = offset;
3355}
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368static inline void del_chain_block(int bucket, int size, int next)
3369{
3370 nr_free_chain_hlocks -= size;
3371 chain_block_buckets[bucket] = next;
3372
3373 if (!bucket)
3374 nr_large_chain_blocks--;
3375}
3376
3377static void init_chain_block_buckets(void)
3378{
3379 int i;
3380
3381 for (i = 0; i < MAX_CHAIN_BUCKETS; i++)
3382 chain_block_buckets[i] = -1;
3383
3384 add_chain_block(0, ARRAY_SIZE(chain_hlocks));
3385}
3386
3387
3388
3389
3390
3391
3392
3393static int alloc_chain_hlocks(int req)
3394{
3395 int bucket, curr, size;
3396
3397
3398
3399
3400
3401 BUILD_BUG_ON((MAX_LOCKDEP_KEYS-1) & CHAIN_BLK_FLAG);
3402
3403 init_data_structures_once();
3404
3405 if (nr_free_chain_hlocks < req)
3406 return -1;
3407
3408
3409
3410
3411
3412 req = max(req, 2);
3413 bucket = size_to_bucket(req);
3414 curr = chain_block_buckets[bucket];
3415
3416 if (bucket) {
3417 if (curr >= 0) {
3418 del_chain_block(bucket, req, chain_block_next(curr));
3419 return curr;
3420 }
3421
3422 curr = chain_block_buckets[0];
3423 }
3424
3425
3426
3427
3428
3429 if (curr >= 0) {
3430 size = chain_block_size(curr);
3431 if (likely(size >= req)) {
3432 del_chain_block(0, size, chain_block_next(curr));
3433 add_chain_block(curr + req, size - req);
3434 return curr;
3435 }
3436 }
3437
3438
3439
3440
3441 for (size = MAX_CHAIN_BUCKETS; size > req; size--) {
3442 bucket = size_to_bucket(size);
3443 curr = chain_block_buckets[bucket];
3444 if (curr < 0)
3445 continue;
3446
3447 del_chain_block(bucket, size, chain_block_next(curr));
3448 add_chain_block(curr + req, size - req);
3449 return curr;
3450 }
3451
3452 return -1;
3453}
3454
3455static inline void free_chain_hlocks(int base, int size)
3456{
3457 add_chain_block(base, max(size, 2));
3458}
3459
3460struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
3461{
3462 u16 chain_hlock = chain_hlocks[chain->base + i];
3463 unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
3464
3465 return lock_classes + class_idx;
3466}
3467
3468
3469
3470
3471static inline int get_first_held_lock(struct task_struct *curr,
3472 struct held_lock *hlock)
3473{
3474 int i;
3475 struct held_lock *hlock_curr;
3476
3477 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
3478 hlock_curr = curr->held_locks + i;
3479 if (hlock_curr->irq_context != hlock->irq_context)
3480 break;
3481
3482 }
3483
3484 return ++i;
3485}
3486
3487#ifdef CONFIG_DEBUG_LOCKDEP
3488
3489
3490
3491static u64 print_chain_key_iteration(u16 hlock_id, u64 chain_key)
3492{
3493 u64 new_chain_key = iterate_chain_key(chain_key, hlock_id);
3494
3495 printk(" hlock_id:%d -> chain_key:%016Lx",
3496 (unsigned int)hlock_id,
3497 (unsigned long long)new_chain_key);
3498 return new_chain_key;
3499}
3500
3501static void
3502print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
3503{
3504 struct held_lock *hlock;
3505 u64 chain_key = INITIAL_CHAIN_KEY;
3506 int depth = curr->lockdep_depth;
3507 int i = get_first_held_lock(curr, hlock_next);
3508
3509 printk("depth: %u (irq_context %u)\n", depth - i + 1,
3510 hlock_next->irq_context);
3511 for (; i < depth; i++) {
3512 hlock = curr->held_locks + i;
3513 chain_key = print_chain_key_iteration(hlock_id(hlock), chain_key);
3514
3515 print_lock(hlock);
3516 }
3517
3518 print_chain_key_iteration(hlock_id(hlock_next), chain_key);
3519 print_lock(hlock_next);
3520}
3521
3522static void print_chain_keys_chain(struct lock_chain *chain)
3523{
3524 int i;
3525 u64 chain_key = INITIAL_CHAIN_KEY;
3526 u16 hlock_id;
3527
3528 printk("depth: %u\n", chain->depth);
3529 for (i = 0; i < chain->depth; i++) {
3530 hlock_id = chain_hlocks[chain->base + i];
3531 chain_key = print_chain_key_iteration(hlock_id, chain_key);
3532
3533 print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
3534 printk("\n");
3535 }
3536}
3537
3538static void print_collision(struct task_struct *curr,
3539 struct held_lock *hlock_next,
3540 struct lock_chain *chain)
3541{
3542 pr_warn("\n");
3543 pr_warn("============================\n");
3544 pr_warn("WARNING: chain_key collision\n");
3545 print_kernel_ident();
3546 pr_warn("----------------------------\n");
3547 pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
3548 pr_warn("Hash chain already cached but the contents don't match!\n");
3549
3550 pr_warn("Held locks:");
3551 print_chain_keys_held_locks(curr, hlock_next);
3552
3553 pr_warn("Locks in cached chain:");
3554 print_chain_keys_chain(chain);
3555
3556 pr_warn("\nstack backtrace:\n");
3557 dump_stack();
3558}
3559#endif
3560
3561
3562
3563
3564
3565
3566
3567static int check_no_collision(struct task_struct *curr,
3568 struct held_lock *hlock,
3569 struct lock_chain *chain)
3570{
3571#ifdef CONFIG_DEBUG_LOCKDEP
3572 int i, j, id;
3573
3574 i = get_first_held_lock(curr, hlock);
3575
3576 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
3577 print_collision(curr, hlock, chain);
3578 return 0;
3579 }
3580
3581 for (j = 0; j < chain->depth - 1; j++, i++) {
3582 id = hlock_id(&curr->held_locks[i]);
3583
3584 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
3585 print_collision(curr, hlock, chain);
3586 return 0;
3587 }
3588 }
3589#endif
3590 return 1;
3591}
3592
3593
3594
3595
3596
3597long lockdep_next_lockchain(long i)
3598{
3599 i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
3600 return i < ARRAY_SIZE(lock_chains) ? i : -2;
3601}
3602
3603unsigned long lock_chain_count(void)
3604{
3605 return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
3606}
3607
3608
3609static struct lock_chain *alloc_lock_chain(void)
3610{
3611 int idx = find_first_zero_bit(lock_chains_in_use,
3612 ARRAY_SIZE(lock_chains));
3613
3614 if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
3615 return NULL;
3616 __set_bit(idx, lock_chains_in_use);
3617 return lock_chains + idx;
3618}
3619
3620
3621
3622
3623
3624
3625
3626
3627static inline int add_chain_cache(struct task_struct *curr,
3628 struct held_lock *hlock,
3629 u64 chain_key)
3630{
3631 struct hlist_head *hash_head = chainhashentry(chain_key);
3632 struct lock_chain *chain;
3633 int i, j;
3634
3635
3636
3637
3638
3639
3640 if (lockdep_assert_locked())
3641 return 0;
3642
3643 chain = alloc_lock_chain();
3644 if (!chain) {
3645 if (!debug_locks_off_graph_unlock())
3646 return 0;
3647
3648 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
3649 dump_stack();
3650 return 0;
3651 }
3652 chain->chain_key = chain_key;
3653 chain->irq_context = hlock->irq_context;
3654 i = get_first_held_lock(curr, hlock);
3655 chain->depth = curr->lockdep_depth + 1 - i;
3656
3657 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
3658 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
3659 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
3660
3661 j = alloc_chain_hlocks(chain->depth);
3662 if (j < 0) {
3663 if (!debug_locks_off_graph_unlock())
3664 return 0;
3665
3666 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
3667 dump_stack();
3668 return 0;
3669 }
3670
3671 chain->base = j;
3672 for (j = 0; j < chain->depth - 1; j++, i++) {
3673 int lock_id = hlock_id(curr->held_locks + i);
3674
3675 chain_hlocks[chain->base + j] = lock_id;
3676 }
3677 chain_hlocks[chain->base + j] = hlock_id(hlock);
3678 hlist_add_head_rcu(&chain->entry, hash_head);
3679 debug_atomic_inc(chain_lookup_misses);
3680 inc_chains(chain->irq_context);
3681
3682 return 1;
3683}
3684
3685
3686
3687
3688
3689static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
3690{
3691 struct hlist_head *hash_head = chainhashentry(chain_key);
3692 struct lock_chain *chain;
3693
3694 hlist_for_each_entry_rcu(chain, hash_head, entry) {
3695 if (READ_ONCE(chain->chain_key) == chain_key) {
3696 debug_atomic_inc(chain_lookup_hits);
3697 return chain;
3698 }
3699 }
3700 return NULL;
3701}
3702
3703
3704
3705
3706
3707
3708
3709static inline int lookup_chain_cache_add(struct task_struct *curr,
3710 struct held_lock *hlock,
3711 u64 chain_key)
3712{
3713 struct lock_class *class = hlock_class(hlock);
3714 struct lock_chain *chain = lookup_chain_cache(chain_key);
3715
3716 if (chain) {
3717cache_hit:
3718 if (!check_no_collision(curr, hlock, chain))
3719 return 0;
3720
3721 if (very_verbose(class)) {
3722 printk("\nhash chain already cached, key: "
3723 "%016Lx tail class: [%px] %s\n",
3724 (unsigned long long)chain_key,
3725 class->key, class->name);
3726 }
3727
3728 return 0;
3729 }
3730
3731 if (very_verbose(class)) {
3732 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
3733 (unsigned long long)chain_key, class->key, class->name);
3734 }
3735
3736 if (!graph_lock())
3737 return 0;
3738
3739
3740
3741
3742 chain = lookup_chain_cache(chain_key);
3743 if (chain) {
3744 graph_unlock();
3745 goto cache_hit;
3746 }
3747
3748 if (!add_chain_cache(curr, hlock, chain_key))
3749 return 0;
3750
3751 return 1;
3752}
3753
3754static int validate_chain(struct task_struct *curr,
3755 struct held_lock *hlock,
3756 int chain_head, u64 chain_key)
3757{
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768 if (!hlock->trylock && hlock->check &&
3769 lookup_chain_cache_add(curr, hlock, chain_key)) {
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788 int ret = check_deadlock(curr, hlock);
3789
3790 if (!ret)
3791 return 0;
3792
3793
3794
3795
3796
3797
3798
3799
3800 if (!chain_head && ret != 2) {
3801 if (!check_prevs_add(curr, hlock))
3802 return 0;
3803 }
3804
3805 graph_unlock();
3806 } else {
3807
3808 if (unlikely(!debug_locks))
3809 return 0;
3810 }
3811
3812 return 1;
3813}
3814#else
3815static inline int validate_chain(struct task_struct *curr,
3816 struct held_lock *hlock,
3817 int chain_head, u64 chain_key)
3818{
3819 return 1;
3820}
3821
3822static void init_chain_block_buckets(void) { }
3823#endif
3824
3825
3826
3827
3828
3829static void check_chain_key(struct task_struct *curr)
3830{
3831#ifdef CONFIG_DEBUG_LOCKDEP
3832 struct held_lock *hlock, *prev_hlock = NULL;
3833 unsigned int i;
3834 u64 chain_key = INITIAL_CHAIN_KEY;
3835
3836 for (i = 0; i < curr->lockdep_depth; i++) {
3837 hlock = curr->held_locks + i;
3838 if (chain_key != hlock->prev_chain_key) {
3839 debug_locks_off();
3840
3841
3842
3843
3844 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
3845 curr->lockdep_depth, i,
3846 (unsigned long long)chain_key,
3847 (unsigned long long)hlock->prev_chain_key);
3848 return;
3849 }
3850
3851
3852
3853
3854
3855 if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
3856 return;
3857
3858 if (prev_hlock && (prev_hlock->irq_context !=
3859 hlock->irq_context))
3860 chain_key = INITIAL_CHAIN_KEY;
3861 chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
3862 prev_hlock = hlock;
3863 }
3864 if (chain_key != curr->curr_chain_key) {
3865 debug_locks_off();
3866
3867
3868
3869
3870 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
3871 curr->lockdep_depth, i,
3872 (unsigned long long)chain_key,
3873 (unsigned long long)curr->curr_chain_key);
3874 }
3875#endif
3876}
3877
3878#ifdef CONFIG_PROVE_LOCKING
3879static int mark_lock(struct task_struct *curr, struct held_lock *this,
3880 enum lock_usage_bit new_bit);
3881
3882static void print_usage_bug_scenario(struct held_lock *lock)
3883{
3884 struct lock_class *class = hlock_class(lock);
3885
3886 printk(" Possible unsafe locking scenario:\n\n");
3887 printk(" CPU0\n");
3888 printk(" ----\n");
3889 printk(" lock(");
3890 __print_lock_name(class);
3891 printk(KERN_CONT ");\n");
3892 printk(" <Interrupt>\n");
3893 printk(" lock(");
3894 __print_lock_name(class);
3895 printk(KERN_CONT ");\n");
3896 printk("\n *** DEADLOCK ***\n\n");
3897}
3898
3899static void
3900print_usage_bug(struct task_struct *curr, struct held_lock *this,
3901 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
3902{
3903 if (!debug_locks_off() || debug_locks_silent)
3904 return;
3905
3906 pr_warn("\n");
3907 pr_warn("================================\n");
3908 pr_warn("WARNING: inconsistent lock state\n");
3909 print_kernel_ident();
3910 pr_warn("--------------------------------\n");
3911
3912 pr_warn("inconsistent {%s} -> {%s} usage.\n",
3913 usage_str[prev_bit], usage_str[new_bit]);
3914
3915 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
3916 curr->comm, task_pid_nr(curr),
3917 lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
3918 lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
3919 lockdep_hardirqs_enabled(),
3920 lockdep_softirqs_enabled(curr));
3921 print_lock(this);
3922
3923 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
3924 print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
3925
3926 print_irqtrace_events(curr);
3927 pr_warn("\nother info that might help us debug this:\n");
3928 print_usage_bug_scenario(this);
3929
3930 lockdep_print_held_locks(curr);
3931
3932 pr_warn("\nstack backtrace:\n");
3933 dump_stack();
3934}
3935
3936
3937
3938
3939static inline int
3940valid_state(struct task_struct *curr, struct held_lock *this,
3941 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
3942{
3943 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
3944 graph_unlock();
3945 print_usage_bug(curr, this, bad_bit, new_bit);
3946 return 0;
3947 }
3948 return 1;
3949}
3950
3951
3952
3953
3954
3955static void
3956print_irq_inversion_bug(struct task_struct *curr,
3957 struct lock_list *root, struct lock_list *other,
3958 struct held_lock *this, int forwards,
3959 const char *irqclass)
3960{
3961 struct lock_list *entry = other;
3962 struct lock_list *middle = NULL;
3963 int depth;
3964
3965 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
3966 return;
3967
3968 pr_warn("\n");
3969 pr_warn("========================================================\n");
3970 pr_warn("WARNING: possible irq lock inversion dependency detected\n");
3971 print_kernel_ident();
3972 pr_warn("--------------------------------------------------------\n");
3973 pr_warn("%s/%d just changed the state of lock:\n",
3974 curr->comm, task_pid_nr(curr));
3975 print_lock(this);
3976 if (forwards)
3977 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
3978 else
3979 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
3980 print_lock_name(other->class);
3981 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
3982
3983 pr_warn("\nother info that might help us debug this:\n");
3984
3985
3986 depth = get_lock_depth(other);
3987 do {
3988 if (depth == 0 && (entry != root)) {
3989 pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
3990 break;
3991 }
3992 middle = entry;
3993 entry = get_lock_parent(entry);
3994 depth--;
3995 } while (entry && entry != root && (depth >= 0));
3996 if (forwards)
3997 print_irq_lock_scenario(root, other,
3998 middle ? middle->class : root->class, other->class);
3999 else
4000 print_irq_lock_scenario(other, root,
4001 middle ? middle->class : other->class, root->class);
4002
4003 lockdep_print_held_locks(curr);
4004
4005 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
4006 root->trace = save_trace();
4007 if (!root->trace)
4008 return;
4009 print_shortest_lock_dependencies(other, root);
4010
4011 pr_warn("\nstack backtrace:\n");
4012 dump_stack();
4013}
4014
4015
4016
4017
4018
4019static int
4020check_usage_forwards(struct task_struct *curr, struct held_lock *this,
4021 enum lock_usage_bit bit)
4022{
4023 enum bfs_result ret;
4024 struct lock_list root;
4025 struct lock_list *target_entry;
4026 enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
4027 unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
4028
4029 bfs_init_root(&root, this);
4030 ret = find_usage_forwards(&root, usage_mask, &target_entry);
4031 if (bfs_error(ret)) {
4032 print_bfs_bug(ret);
4033 return 0;
4034 }
4035 if (ret == BFS_RNOMATCH)
4036 return 1;
4037
4038
4039 if (target_entry->class->usage_mask & lock_flag(bit)) {
4040 print_irq_inversion_bug(curr, &root, target_entry,
4041 this, 1, state_name(bit));
4042 } else {
4043 print_irq_inversion_bug(curr, &root, target_entry,
4044 this, 1, state_name(read_bit));
4045 }
4046
4047 return 0;
4048}
4049
4050
4051
4052
4053
4054static int
4055check_usage_backwards(struct task_struct *curr, struct held_lock *this,
4056 enum lock_usage_bit bit)
4057{
4058 enum bfs_result ret;
4059 struct lock_list root;
4060 struct lock_list *target_entry;
4061 enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
4062 unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
4063
4064 bfs_init_rootb(&root, this);
4065 ret = find_usage_backwards(&root, usage_mask, &target_entry);
4066 if (bfs_error(ret)) {
4067 print_bfs_bug(ret);
4068 return 0;
4069 }
4070 if (ret == BFS_RNOMATCH)
4071 return 1;
4072
4073
4074 if (target_entry->class->usage_mask & lock_flag(bit)) {
4075 print_irq_inversion_bug(curr, &root, target_entry,
4076 this, 0, state_name(bit));
4077 } else {
4078 print_irq_inversion_bug(curr, &root, target_entry,
4079 this, 0, state_name(read_bit));
4080 }
4081
4082 return 0;
4083}
4084
4085void print_irqtrace_events(struct task_struct *curr)
4086{
4087 const struct irqtrace_events *trace = &curr->irqtrace;
4088
4089 printk("irq event stamp: %u\n", trace->irq_events);
4090 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
4091 trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
4092 (void *)trace->hardirq_enable_ip);
4093 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
4094 trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip,
4095 (void *)trace->hardirq_disable_ip);
4096 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
4097 trace->softirq_enable_event, (void *)trace->softirq_enable_ip,
4098 (void *)trace->softirq_enable_ip);
4099 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
4100 trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
4101 (void *)trace->softirq_disable_ip);
4102}
4103
4104static int HARDIRQ_verbose(struct lock_class *class)
4105{
4106#if HARDIRQ_VERBOSE
4107 return class_filter(class);
4108#endif
4109 return 0;
4110}
4111
4112static int SOFTIRQ_verbose(struct lock_class *class)
4113{
4114#if SOFTIRQ_VERBOSE
4115 return class_filter(class);
4116#endif
4117 return 0;
4118}
4119
4120static int (*state_verbose_f[])(struct lock_class *class) = {
4121#define LOCKDEP_STATE(__STATE) \
4122 __STATE##_verbose,
4123#include "lockdep_states.h"
4124#undef LOCKDEP_STATE
4125};
4126
4127static inline int state_verbose(enum lock_usage_bit bit,
4128 struct lock_class *class)
4129{
4130 return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
4131}
4132
4133typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
4134 enum lock_usage_bit bit, const char *name);
4135
4136static int
4137mark_lock_irq(struct task_struct *curr, struct held_lock *this,
4138 enum lock_usage_bit new_bit)
4139{
4140 int excl_bit = exclusive_bit(new_bit);
4141 int read = new_bit & LOCK_USAGE_READ_MASK;
4142 int dir = new_bit & LOCK_USAGE_DIR_MASK;
4143
4144
4145
4146
4147
4148 if (!valid_state(curr, this, new_bit, excl_bit))
4149 return 0;
4150
4151
4152
4153
4154 if (!read && !valid_state(curr, this, new_bit,
4155 excl_bit + LOCK_USAGE_READ_MASK))
4156 return 0;
4157
4158
4159
4160
4161
4162
4163 if (dir) {
4164
4165
4166
4167
4168 if (!check_usage_backwards(curr, this, excl_bit))
4169 return 0;
4170 } else {
4171
4172
4173
4174
4175 if (!check_usage_forwards(curr, this, excl_bit))
4176 return 0;
4177 }
4178
4179 if (state_verbose(new_bit, hlock_class(this)))
4180 return 2;
4181
4182 return 1;
4183}
4184
4185
4186
4187
4188static int
4189mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
4190{
4191 struct held_lock *hlock;
4192 int i;
4193
4194 for (i = 0; i < curr->lockdep_depth; i++) {
4195 enum lock_usage_bit hlock_bit = base_bit;
4196 hlock = curr->held_locks + i;
4197
4198 if (hlock->read)
4199 hlock_bit += LOCK_USAGE_READ_MASK;
4200
4201 BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
4202
4203 if (!hlock->check)
4204 continue;
4205
4206 if (!mark_lock(curr, hlock, hlock_bit))
4207 return 0;
4208 }
4209
4210 return 1;
4211}
4212
4213
4214
4215
4216static void __trace_hardirqs_on_caller(void)
4217{
4218 struct task_struct *curr = current;
4219
4220
4221
4222
4223
4224 if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
4225 return;
4226
4227
4228
4229
4230
4231 if (curr->softirqs_enabled)
4232 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
4233}
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244void lockdep_hardirqs_on_prepare(unsigned long ip)
4245{
4246 if (unlikely(!debug_locks))
4247 return;
4248
4249
4250
4251
4252 if (unlikely(in_nmi()))
4253 return;
4254
4255 if (unlikely(this_cpu_read(lockdep_recursion)))
4256 return;
4257
4258 if (unlikely(lockdep_hardirqs_enabled())) {
4259
4260
4261
4262
4263
4264 __debug_atomic_inc(redundant_hardirqs_on);
4265 return;
4266 }
4267
4268
4269
4270
4271
4272
4273 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4274 return;
4275
4276
4277
4278
4279 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
4280 return;
4281
4282
4283
4284
4285
4286 if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
4287 return;
4288
4289 current->hardirq_chain_key = current->curr_chain_key;
4290
4291 lockdep_recursion_inc();
4292 __trace_hardirqs_on_caller();
4293 lockdep_recursion_finish();
4294}
4295EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
4296
4297void noinstr lockdep_hardirqs_on(unsigned long ip)
4298{
4299 struct irqtrace_events *trace = ¤t->irqtrace;
4300
4301 if (unlikely(!debug_locks))
4302 return;
4303
4304
4305
4306
4307
4308
4309
4310
4311 if (unlikely(in_nmi())) {
4312 if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
4313 return;
4314
4315
4316
4317
4318
4319
4320
4321 goto skip_checks;
4322 }
4323
4324 if (unlikely(this_cpu_read(lockdep_recursion)))
4325 return;
4326
4327 if (lockdep_hardirqs_enabled()) {
4328
4329
4330
4331
4332
4333 __debug_atomic_inc(redundant_hardirqs_on);
4334 return;
4335 }
4336
4337
4338
4339
4340
4341
4342 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4343 return;
4344
4345
4346
4347
4348
4349 DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
4350 current->curr_chain_key);
4351
4352skip_checks:
4353
4354 __this_cpu_write(hardirqs_enabled, 1);
4355 trace->hardirq_enable_ip = ip;
4356 trace->hardirq_enable_event = ++trace->irq_events;
4357 debug_atomic_inc(hardirqs_on_events);
4358}
4359EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
4360
4361
4362
4363
4364void noinstr lockdep_hardirqs_off(unsigned long ip)
4365{
4366 if (unlikely(!debug_locks))
4367 return;
4368
4369
4370
4371
4372
4373
4374 if (in_nmi()) {
4375 if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
4376 return;
4377 } else if (__this_cpu_read(lockdep_recursion))
4378 return;
4379
4380
4381
4382
4383
4384 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4385 return;
4386
4387 if (lockdep_hardirqs_enabled()) {
4388 struct irqtrace_events *trace = ¤t->irqtrace;
4389
4390
4391
4392
4393 __this_cpu_write(hardirqs_enabled, 0);
4394 trace->hardirq_disable_ip = ip;
4395 trace->hardirq_disable_event = ++trace->irq_events;
4396 debug_atomic_inc(hardirqs_off_events);
4397 } else {
4398 debug_atomic_inc(redundant_hardirqs_off);
4399 }
4400}
4401EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
4402
4403
4404
4405
4406void lockdep_softirqs_on(unsigned long ip)
4407{
4408 struct irqtrace_events *trace = ¤t->irqtrace;
4409
4410 if (unlikely(!lockdep_enabled()))
4411 return;
4412
4413
4414
4415
4416
4417 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4418 return;
4419
4420 if (current->softirqs_enabled) {
4421 debug_atomic_inc(redundant_softirqs_on);
4422 return;
4423 }
4424
4425 lockdep_recursion_inc();
4426
4427
4428
4429 current->softirqs_enabled = 1;
4430 trace->softirq_enable_ip = ip;
4431 trace->softirq_enable_event = ++trace->irq_events;
4432 debug_atomic_inc(softirqs_on_events);
4433
4434
4435
4436
4437
4438 if (lockdep_hardirqs_enabled())
4439 mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
4440 lockdep_recursion_finish();
4441}
4442
4443
4444
4445
4446void lockdep_softirqs_off(unsigned long ip)
4447{
4448 if (unlikely(!lockdep_enabled()))
4449 return;
4450
4451
4452
4453
4454 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4455 return;
4456
4457 if (current->softirqs_enabled) {
4458 struct irqtrace_events *trace = ¤t->irqtrace;
4459
4460
4461
4462
4463 current->softirqs_enabled = 0;
4464 trace->softirq_disable_ip = ip;
4465 trace->softirq_disable_event = ++trace->irq_events;
4466 debug_atomic_inc(softirqs_off_events);
4467
4468
4469
4470 DEBUG_LOCKS_WARN_ON(!softirq_count());
4471 } else
4472 debug_atomic_inc(redundant_softirqs_off);
4473}
4474
4475static int
4476mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4477{
4478 if (!check)
4479 goto lock_used;
4480
4481
4482
4483
4484
4485 if (!hlock->trylock) {
4486 if (hlock->read) {
4487 if (lockdep_hardirq_context())
4488 if (!mark_lock(curr, hlock,
4489 LOCK_USED_IN_HARDIRQ_READ))
4490 return 0;
4491 if (curr->softirq_context)
4492 if (!mark_lock(curr, hlock,
4493 LOCK_USED_IN_SOFTIRQ_READ))
4494 return 0;
4495 } else {
4496 if (lockdep_hardirq_context())
4497 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
4498 return 0;
4499 if (curr->softirq_context)
4500 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
4501 return 0;
4502 }
4503 }
4504 if (!hlock->hardirqs_off) {
4505 if (hlock->read) {
4506 if (!mark_lock(curr, hlock,
4507 LOCK_ENABLED_HARDIRQ_READ))
4508 return 0;
4509 if (curr->softirqs_enabled)
4510 if (!mark_lock(curr, hlock,
4511 LOCK_ENABLED_SOFTIRQ_READ))
4512 return 0;
4513 } else {
4514 if (!mark_lock(curr, hlock,
4515 LOCK_ENABLED_HARDIRQ))
4516 return 0;
4517 if (curr->softirqs_enabled)
4518 if (!mark_lock(curr, hlock,
4519 LOCK_ENABLED_SOFTIRQ))
4520 return 0;
4521 }
4522 }
4523
4524lock_used:
4525
4526 if (!mark_lock(curr, hlock, LOCK_USED))
4527 return 0;
4528
4529 return 1;
4530}
4531
4532static inline unsigned int task_irq_context(struct task_struct *task)
4533{
4534 return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() +
4535 LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
4536}
4537
4538static int separate_irq_context(struct task_struct *curr,
4539 struct held_lock *hlock)
4540{
4541 unsigned int depth = curr->lockdep_depth;
4542
4543
4544
4545
4546 if (depth) {
4547 struct held_lock *prev_hlock;
4548
4549 prev_hlock = curr->held_locks + depth-1;
4550
4551
4552
4553
4554
4555 if (prev_hlock->irq_context != hlock->irq_context)
4556 return 1;
4557 }
4558 return 0;
4559}
4560
4561
4562
4563
4564static int mark_lock(struct task_struct *curr, struct held_lock *this,
4565 enum lock_usage_bit new_bit)
4566{
4567 unsigned int new_mask, ret = 1;
4568
4569 if (new_bit >= LOCK_USAGE_STATES) {
4570 DEBUG_LOCKS_WARN_ON(1);
4571 return 0;
4572 }
4573
4574 if (new_bit == LOCK_USED && this->read)
4575 new_bit = LOCK_USED_READ;
4576
4577 new_mask = 1 << new_bit;
4578
4579
4580
4581
4582
4583 if (likely(hlock_class(this)->usage_mask & new_mask))
4584 return 1;
4585
4586 if (!graph_lock())
4587 return 0;
4588
4589
4590
4591 if (unlikely(hlock_class(this)->usage_mask & new_mask))
4592 goto unlock;
4593
4594 if (!hlock_class(this)->usage_mask)
4595 debug_atomic_dec(nr_unused_locks);
4596
4597 hlock_class(this)->usage_mask |= new_mask;
4598
4599 if (new_bit < LOCK_TRACE_STATES) {
4600 if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
4601 return 0;
4602 }
4603
4604 if (new_bit < LOCK_USED) {
4605 ret = mark_lock_irq(curr, this, new_bit);
4606 if (!ret)
4607 return 0;
4608 }
4609
4610unlock:
4611 graph_unlock();
4612
4613
4614
4615
4616 if (ret == 2) {
4617 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
4618 print_lock(this);
4619 print_irqtrace_events(curr);
4620 dump_stack();
4621 }
4622
4623 return ret;
4624}
4625
4626static inline short task_wait_context(struct task_struct *curr)
4627{
4628
4629
4630
4631
4632 if (lockdep_hardirq_context()) {
4633
4634
4635
4636 if (curr->hardirq_threaded || curr->irq_config)
4637 return LD_WAIT_CONFIG;
4638
4639 return LD_WAIT_SPIN;
4640 } else if (curr->softirq_context) {
4641
4642
4643
4644 return LD_WAIT_CONFIG;
4645 }
4646
4647 return LD_WAIT_MAX;
4648}
4649
4650static int
4651print_lock_invalid_wait_context(struct task_struct *curr,
4652 struct held_lock *hlock)
4653{
4654 short curr_inner;
4655
4656 if (!debug_locks_off())
4657 return 0;
4658 if (debug_locks_silent)
4659 return 0;
4660
4661 pr_warn("\n");
4662 pr_warn("=============================\n");
4663 pr_warn("[ BUG: Invalid wait context ]\n");
4664 print_kernel_ident();
4665 pr_warn("-----------------------------\n");
4666
4667 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
4668 print_lock(hlock);
4669
4670 pr_warn("other info that might help us debug this:\n");
4671
4672 curr_inner = task_wait_context(curr);
4673 pr_warn("context-{%d:%d}\n", curr_inner, curr_inner);
4674
4675 lockdep_print_held_locks(curr);
4676
4677 pr_warn("stack backtrace:\n");
4678 dump_stack();
4679
4680 return 0;
4681}
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698static int check_wait_context(struct task_struct *curr, struct held_lock *next)
4699{
4700 u8 next_inner = hlock_class(next)->wait_type_inner;
4701 u8 next_outer = hlock_class(next)->wait_type_outer;
4702 u8 curr_inner;
4703 int depth;
4704
4705 if (!next_inner || next->trylock)
4706 return 0;
4707
4708 if (!next_outer)
4709 next_outer = next_inner;
4710
4711
4712
4713
4714 for (depth = curr->lockdep_depth - 1; depth >= 0; depth--) {
4715 struct held_lock *prev = curr->held_locks + depth;
4716 if (prev->irq_context != next->irq_context)
4717 break;
4718 }
4719 depth++;
4720
4721 curr_inner = task_wait_context(curr);
4722
4723 for (; depth < curr->lockdep_depth; depth++) {
4724 struct held_lock *prev = curr->held_locks + depth;
4725 u8 prev_inner = hlock_class(prev)->wait_type_inner;
4726
4727 if (prev_inner) {
4728
4729
4730
4731
4732
4733
4734 curr_inner = min(curr_inner, prev_inner);
4735 }
4736 }
4737
4738 if (next_outer > curr_inner)
4739 return print_lock_invalid_wait_context(curr, next);
4740
4741 return 0;
4742}
4743
4744#else
4745
4746static inline int
4747mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4748{
4749 return 1;
4750}
4751
4752static inline unsigned int task_irq_context(struct task_struct *task)
4753{
4754 return 0;
4755}
4756
4757static inline int separate_irq_context(struct task_struct *curr,
4758 struct held_lock *hlock)
4759{
4760 return 0;
4761}
4762
4763static inline int check_wait_context(struct task_struct *curr,
4764 struct held_lock *next)
4765{
4766 return 0;
4767}
4768
4769#endif
4770
4771
4772
4773
4774void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
4775 struct lock_class_key *key, int subclass,
4776 u8 inner, u8 outer, u8 lock_type)
4777{
4778 int i;
4779
4780 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
4781 lock->class_cache[i] = NULL;
4782
4783#ifdef CONFIG_LOCK_STAT
4784 lock->cpu = raw_smp_processor_id();
4785#endif
4786
4787
4788
4789
4790 if (DEBUG_LOCKS_WARN_ON(!name)) {
4791 lock->name = "NULL";
4792 return;
4793 }
4794
4795 lock->name = name;
4796
4797 lock->wait_type_outer = outer;
4798 lock->wait_type_inner = inner;
4799 lock->lock_type = lock_type;
4800
4801
4802
4803
4804 if (DEBUG_LOCKS_WARN_ON(!key))
4805 return;
4806
4807
4808
4809
4810 if (!static_obj(key) && !is_dynamic_key(key)) {
4811 if (debug_locks)
4812 printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
4813 DEBUG_LOCKS_WARN_ON(1);
4814 return;
4815 }
4816 lock->key = key;
4817
4818 if (unlikely(!debug_locks))
4819 return;
4820
4821 if (subclass) {
4822 unsigned long flags;
4823
4824 if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
4825 return;
4826
4827 raw_local_irq_save(flags);
4828 lockdep_recursion_inc();
4829 register_lock_class(lock, subclass, 1);
4830 lockdep_recursion_finish();
4831 raw_local_irq_restore(flags);
4832 }
4833}
4834EXPORT_SYMBOL_GPL(lockdep_init_map_type);
4835
4836struct lock_class_key __lockdep_no_validate__;
4837EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
4838
4839static void
4840print_lock_nested_lock_not_held(struct task_struct *curr,
4841 struct held_lock *hlock,
4842 unsigned long ip)
4843{
4844 if (!debug_locks_off())
4845 return;
4846 if (debug_locks_silent)
4847 return;
4848
4849 pr_warn("\n");
4850 pr_warn("==================================\n");
4851 pr_warn("WARNING: Nested lock was not taken\n");
4852 print_kernel_ident();
4853 pr_warn("----------------------------------\n");
4854
4855 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
4856 print_lock(hlock);
4857
4858 pr_warn("\nbut this task is not holding:\n");
4859 pr_warn("%s\n", hlock->nest_lock->name);
4860
4861 pr_warn("\nstack backtrace:\n");
4862 dump_stack();
4863
4864 pr_warn("\nother info that might help us debug this:\n");
4865 lockdep_print_held_locks(curr);
4866
4867 pr_warn("\nstack backtrace:\n");
4868 dump_stack();
4869}
4870
4871static int __lock_is_held(const struct lockdep_map *lock, int read);
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
4882 int trylock, int read, int check, int hardirqs_off,
4883 struct lockdep_map *nest_lock, unsigned long ip,
4884 int references, int pin_count)
4885{
4886 struct task_struct *curr = current;
4887 struct lock_class *class = NULL;
4888 struct held_lock *hlock;
4889 unsigned int depth;
4890 int chain_head = 0;
4891 int class_idx;
4892 u64 chain_key;
4893
4894 if (unlikely(!debug_locks))
4895 return 0;
4896
4897 if (!prove_locking || lock->key == &__lockdep_no_validate__)
4898 check = 0;
4899
4900 if (subclass < NR_LOCKDEP_CACHING_CLASSES)
4901 class = lock->class_cache[subclass];
4902
4903
4904
4905 if (unlikely(!class)) {
4906 class = register_lock_class(lock, subclass, 0);
4907 if (!class)
4908 return 0;
4909 }
4910
4911 debug_class_ops_inc(class);
4912
4913 if (very_verbose(class)) {
4914 printk("\nacquire class [%px] %s", class->key, class->name);
4915 if (class->name_version > 1)
4916 printk(KERN_CONT "#%d", class->name_version);
4917 printk(KERN_CONT "\n");
4918 dump_stack();
4919 }
4920
4921
4922
4923
4924
4925
4926 depth = curr->lockdep_depth;
4927
4928
4929
4930 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
4931 return 0;
4932
4933 class_idx = class - lock_classes;
4934
4935 if (depth) {
4936 hlock = curr->held_locks + depth - 1;
4937 if (hlock->class_idx == class_idx && nest_lock) {
4938 if (!references)
4939 references++;
4940
4941 if (!hlock->references)
4942 hlock->references++;
4943
4944 hlock->references += references;
4945
4946
4947 if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
4948 return 0;
4949
4950 return 2;
4951 }
4952 }
4953
4954 hlock = curr->held_locks + depth;
4955
4956
4957
4958
4959 if (DEBUG_LOCKS_WARN_ON(!class))
4960 return 0;
4961 hlock->class_idx = class_idx;
4962 hlock->acquire_ip = ip;
4963 hlock->instance = lock;
4964 hlock->nest_lock = nest_lock;
4965 hlock->irq_context = task_irq_context(curr);
4966 hlock->trylock = trylock;
4967 hlock->read = read;
4968 hlock->check = check;
4969 hlock->hardirqs_off = !!hardirqs_off;
4970 hlock->references = references;
4971#ifdef CONFIG_LOCK_STAT
4972 hlock->waittime_stamp = 0;
4973 hlock->holdtime_stamp = lockstat_clock();
4974#endif
4975 hlock->pin_count = pin_count;
4976
4977 if (check_wait_context(curr, hlock))
4978 return 0;
4979
4980
4981 if (!mark_usage(curr, hlock, check))
4982 return 0;
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997 if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use)))
4998 return 0;
4999
5000 chain_key = curr->curr_chain_key;
5001 if (!depth) {
5002
5003
5004
5005 if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
5006 return 0;
5007 chain_head = 1;
5008 }
5009
5010 hlock->prev_chain_key = chain_key;
5011 if (separate_irq_context(curr, hlock)) {
5012 chain_key = INITIAL_CHAIN_KEY;
5013 chain_head = 1;
5014 }
5015 chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
5016
5017 if (nest_lock && !__lock_is_held(nest_lock, -1)) {
5018 print_lock_nested_lock_not_held(curr, hlock, ip);
5019 return 0;
5020 }
5021
5022 if (!debug_locks_silent) {
5023 WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
5024 WARN_ON_ONCE(!hlock_class(hlock)->key);
5025 }
5026
5027 if (!validate_chain(curr, hlock, chain_head, chain_key))
5028 return 0;
5029
5030 curr->curr_chain_key = chain_key;
5031 curr->lockdep_depth++;
5032 check_chain_key(curr);
5033#ifdef CONFIG_DEBUG_LOCKDEP
5034 if (unlikely(!debug_locks))
5035 return 0;
5036#endif
5037 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
5038 debug_locks_off();
5039 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
5040 printk(KERN_DEBUG "depth: %i max: %lu!\n",
5041 curr->lockdep_depth, MAX_LOCK_DEPTH);
5042
5043 lockdep_print_held_locks(current);
5044 debug_show_all_locks();
5045 dump_stack();
5046
5047 return 0;
5048 }
5049
5050 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
5051 max_lockdep_depth = curr->lockdep_depth;
5052
5053 return 1;
5054}
5055
5056static void print_unlock_imbalance_bug(struct task_struct *curr,
5057 struct lockdep_map *lock,
5058 unsigned long ip)
5059{
5060 if (!debug_locks_off())
5061 return;
5062 if (debug_locks_silent)
5063 return;
5064
5065 pr_warn("\n");
5066 pr_warn("=====================================\n");
5067 pr_warn("WARNING: bad unlock balance detected!\n");
5068 print_kernel_ident();
5069 pr_warn("-------------------------------------\n");
5070 pr_warn("%s/%d is trying to release lock (",
5071 curr->comm, task_pid_nr(curr));
5072 print_lockdep_cache(lock);
5073 pr_cont(") at:\n");
5074 print_ip_sym(KERN_WARNING, ip);
5075 pr_warn("but there are no more locks to release!\n");
5076 pr_warn("\nother info that might help us debug this:\n");
5077 lockdep_print_held_locks(curr);
5078
5079 pr_warn("\nstack backtrace:\n");
5080 dump_stack();
5081}
5082
5083static noinstr int match_held_lock(const struct held_lock *hlock,
5084 const struct lockdep_map *lock)
5085{
5086 if (hlock->instance == lock)
5087 return 1;
5088
5089 if (hlock->references) {
5090 const struct lock_class *class = lock->class_cache[0];
5091
5092 if (!class)
5093 class = look_up_lock_class(lock, 0);
5094
5095
5096
5097
5098
5099
5100
5101 if (!class)
5102 return 0;
5103
5104
5105
5106
5107
5108
5109 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
5110 return 0;
5111
5112 if (hlock->class_idx == class - lock_classes)
5113 return 1;
5114 }
5115
5116 return 0;
5117}
5118
5119
5120static struct held_lock *find_held_lock(struct task_struct *curr,
5121 struct lockdep_map *lock,
5122 unsigned int depth, int *idx)
5123{
5124 struct held_lock *ret, *hlock, *prev_hlock;
5125 int i;
5126
5127 i = depth - 1;
5128 hlock = curr->held_locks + i;
5129 ret = hlock;
5130 if (match_held_lock(hlock, lock))
5131 goto out;
5132
5133 ret = NULL;
5134 for (i--, prev_hlock = hlock--;
5135 i >= 0;
5136 i--, prev_hlock = hlock--) {
5137
5138
5139
5140 if (prev_hlock->irq_context != hlock->irq_context) {
5141 ret = NULL;
5142 break;
5143 }
5144 if (match_held_lock(hlock, lock)) {
5145 ret = hlock;
5146 break;
5147 }
5148 }
5149
5150out:
5151 *idx = i;
5152 return ret;
5153}
5154
5155static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
5156 int idx, unsigned int *merged)
5157{
5158 struct held_lock *hlock;
5159 int first_idx = idx;
5160
5161 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
5162 return 0;
5163
5164 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
5165 switch (__lock_acquire(hlock->instance,
5166 hlock_class(hlock)->subclass,
5167 hlock->trylock,
5168 hlock->read, hlock->check,
5169 hlock->hardirqs_off,
5170 hlock->nest_lock, hlock->acquire_ip,
5171 hlock->references, hlock->pin_count)) {
5172 case 0:
5173 return 1;
5174 case 1:
5175 break;
5176 case 2:
5177 *merged += (idx == first_idx);
5178 break;
5179 default:
5180 WARN_ON(1);
5181 return 0;
5182 }
5183 }
5184 return 0;
5185}
5186
5187static int
5188__lock_set_class(struct lockdep_map *lock, const char *name,
5189 struct lock_class_key *key, unsigned int subclass,
5190 unsigned long ip)
5191{
5192 struct task_struct *curr = current;
5193 unsigned int depth, merged = 0;
5194 struct held_lock *hlock;
5195 struct lock_class *class;
5196 int i;
5197
5198 if (unlikely(!debug_locks))
5199 return 0;
5200
5201 depth = curr->lockdep_depth;
5202
5203
5204
5205
5206 if (DEBUG_LOCKS_WARN_ON(!depth))
5207 return 0;
5208
5209 hlock = find_held_lock(curr, lock, depth, &i);
5210 if (!hlock) {
5211 print_unlock_imbalance_bug(curr, lock, ip);
5212 return 0;
5213 }
5214
5215 lockdep_init_map_waits(lock, name, key, 0,
5216 lock->wait_type_inner,
5217 lock->wait_type_outer);
5218 class = register_lock_class(lock, subclass, 0);
5219 hlock->class_idx = class - lock_classes;
5220
5221 curr->lockdep_depth = i;
5222 curr->curr_chain_key = hlock->prev_chain_key;
5223
5224 if (reacquire_held_locks(curr, depth, i, &merged))
5225 return 0;
5226
5227
5228
5229
5230
5231 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
5232 return 0;
5233 return 1;
5234}
5235
5236static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
5237{
5238 struct task_struct *curr = current;
5239 unsigned int depth, merged = 0;
5240 struct held_lock *hlock;
5241 int i;
5242
5243 if (unlikely(!debug_locks))
5244 return 0;
5245
5246 depth = curr->lockdep_depth;
5247
5248
5249
5250
5251 if (DEBUG_LOCKS_WARN_ON(!depth))
5252 return 0;
5253
5254 hlock = find_held_lock(curr, lock, depth, &i);
5255 if (!hlock) {
5256 print_unlock_imbalance_bug(curr, lock, ip);
5257 return 0;
5258 }
5259
5260 curr->lockdep_depth = i;
5261 curr->curr_chain_key = hlock->prev_chain_key;
5262
5263 WARN(hlock->read, "downgrading a read lock");
5264 hlock->read = 1;
5265 hlock->acquire_ip = ip;
5266
5267 if (reacquire_held_locks(curr, depth, i, &merged))
5268 return 0;
5269
5270
5271 if (DEBUG_LOCKS_WARN_ON(merged))
5272 return 0;
5273
5274
5275
5276
5277
5278 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
5279 return 0;
5280
5281 return 1;
5282}
5283
5284
5285
5286
5287
5288
5289static int
5290__lock_release(struct lockdep_map *lock, unsigned long ip)
5291{
5292 struct task_struct *curr = current;
5293 unsigned int depth, merged = 1;
5294 struct held_lock *hlock;
5295 int i;
5296
5297 if (unlikely(!debug_locks))
5298 return 0;
5299
5300 depth = curr->lockdep_depth;
5301
5302
5303
5304
5305 if (depth <= 0) {
5306 print_unlock_imbalance_bug(curr, lock, ip);
5307 return 0;
5308 }
5309
5310
5311
5312
5313
5314 hlock = find_held_lock(curr, lock, depth, &i);
5315 if (!hlock) {
5316 print_unlock_imbalance_bug(curr, lock, ip);
5317 return 0;
5318 }
5319
5320 if (hlock->instance == lock)
5321 lock_release_holdtime(hlock);
5322
5323 WARN(hlock->pin_count, "releasing a pinned lock\n");
5324
5325 if (hlock->references) {
5326 hlock->references--;
5327 if (hlock->references) {
5328
5329
5330
5331
5332
5333 return 1;
5334 }
5335 }
5336
5337
5338
5339
5340
5341
5342
5343 curr->lockdep_depth = i;
5344 curr->curr_chain_key = hlock->prev_chain_key;
5345
5346
5347
5348
5349
5350 if (i == depth-1)
5351 return 1;
5352
5353 if (reacquire_held_locks(curr, depth, i + 1, &merged))
5354 return 0;
5355
5356
5357
5358
5359
5360
5361 DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);
5362
5363
5364
5365
5366
5367
5368 return 0;
5369}
5370
5371static __always_inline
5372int __lock_is_held(const struct lockdep_map *lock, int read)
5373{
5374 struct task_struct *curr = current;
5375 int i;
5376
5377 for (i = 0; i < curr->lockdep_depth; i++) {
5378 struct held_lock *hlock = curr->held_locks + i;
5379
5380 if (match_held_lock(hlock, lock)) {
5381 if (read == -1 || !!hlock->read == read)
5382 return LOCK_STATE_HELD;
5383
5384 return LOCK_STATE_NOT_HELD;
5385 }
5386 }
5387
5388 return LOCK_STATE_NOT_HELD;
5389}
5390
5391static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
5392{
5393 struct pin_cookie cookie = NIL_COOKIE;
5394 struct task_struct *curr = current;
5395 int i;
5396
5397 if (unlikely(!debug_locks))
5398 return cookie;
5399
5400 for (i = 0; i < curr->lockdep_depth; i++) {
5401 struct held_lock *hlock = curr->held_locks + i;
5402
5403 if (match_held_lock(hlock, lock)) {
5404
5405
5406
5407
5408
5409 cookie.val = 1 + (prandom_u32() >> 16);
5410 hlock->pin_count += cookie.val;
5411 return cookie;
5412 }
5413 }
5414
5415 WARN(1, "pinning an unheld lock\n");
5416 return cookie;
5417}
5418
5419static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5420{
5421 struct task_struct *curr = current;
5422 int i;
5423
5424 if (unlikely(!debug_locks))
5425 return;
5426
5427 for (i = 0; i < curr->lockdep_depth; i++) {
5428 struct held_lock *hlock = curr->held_locks + i;
5429
5430 if (match_held_lock(hlock, lock)) {
5431 hlock->pin_count += cookie.val;
5432 return;
5433 }
5434 }
5435
5436 WARN(1, "pinning an unheld lock\n");
5437}
5438
5439static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5440{
5441 struct task_struct *curr = current;
5442 int i;
5443
5444 if (unlikely(!debug_locks))
5445 return;
5446
5447 for (i = 0; i < curr->lockdep_depth; i++) {
5448 struct held_lock *hlock = curr->held_locks + i;
5449
5450 if (match_held_lock(hlock, lock)) {
5451 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
5452 return;
5453
5454 hlock->pin_count -= cookie.val;
5455
5456 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
5457 hlock->pin_count = 0;
5458
5459 return;
5460 }
5461 }
5462
5463 WARN(1, "unpinning an unheld lock\n");
5464}
5465
5466
5467
5468
5469static noinstr void check_flags(unsigned long flags)
5470{
5471#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
5472 if (!debug_locks)
5473 return;
5474
5475
5476 instrumentation_begin();
5477
5478 if (irqs_disabled_flags(flags)) {
5479 if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
5480 printk("possible reason: unannotated irqs-off.\n");
5481 }
5482 } else {
5483 if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) {
5484 printk("possible reason: unannotated irqs-on.\n");
5485 }
5486 }
5487
5488#ifndef CONFIG_PREEMPT_RT
5489
5490
5491
5492
5493
5494 if (!hardirq_count()) {
5495 if (softirq_count()) {
5496
5497 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
5498 } else {
5499
5500 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
5501 }
5502 }
5503#endif
5504
5505 if (!debug_locks)
5506 print_irqtrace_events(current);
5507
5508 instrumentation_end();
5509#endif
5510}
5511
5512void lock_set_class(struct lockdep_map *lock, const char *name,
5513 struct lock_class_key *key, unsigned int subclass,
5514 unsigned long ip)
5515{
5516 unsigned long flags;
5517
5518 if (unlikely(!lockdep_enabled()))
5519 return;
5520
5521 raw_local_irq_save(flags);
5522 lockdep_recursion_inc();
5523 check_flags(flags);
5524 if (__lock_set_class(lock, name, key, subclass, ip))
5525 check_chain_key(current);
5526 lockdep_recursion_finish();
5527 raw_local_irq_restore(flags);
5528}
5529EXPORT_SYMBOL_GPL(lock_set_class);
5530
5531void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
5532{
5533 unsigned long flags;
5534
5535 if (unlikely(!lockdep_enabled()))
5536 return;
5537
5538 raw_local_irq_save(flags);
5539 lockdep_recursion_inc();
5540 check_flags(flags);
5541 if (__lock_downgrade(lock, ip))
5542 check_chain_key(current);
5543 lockdep_recursion_finish();
5544 raw_local_irq_restore(flags);
5545}
5546EXPORT_SYMBOL_GPL(lock_downgrade);
5547
5548
5549static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock, int subclass)
5550{
5551#ifdef CONFIG_PROVE_LOCKING
5552 struct lock_class *class = look_up_lock_class(lock, subclass);
5553 unsigned long mask = LOCKF_USED;
5554
5555
5556 if (!class)
5557 return;
5558
5559
5560
5561
5562
5563 if (!hlock->read)
5564 mask |= LOCKF_USED_READ;
5565
5566 if (!(class->usage_mask & mask))
5567 return;
5568
5569 hlock->class_idx = class - lock_classes;
5570
5571 print_usage_bug(current, hlock, LOCK_USED, LOCK_USAGE_STATES);
5572#endif
5573}
5574
5575static bool lockdep_nmi(void)
5576{
5577 if (raw_cpu_read(lockdep_recursion))
5578 return false;
5579
5580 if (!in_nmi())
5581 return false;
5582
5583 return true;
5584}
5585
5586
5587
5588
5589
5590
5591
5592bool read_lock_is_recursive(void)
5593{
5594 return force_read_lock_recursive ||
5595 !IS_ENABLED(CONFIG_QUEUED_RWLOCKS) ||
5596 in_interrupt();
5597}
5598EXPORT_SYMBOL_GPL(read_lock_is_recursive);
5599
5600
5601
5602
5603
5604void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
5605 int trylock, int read, int check,
5606 struct lockdep_map *nest_lock, unsigned long ip)
5607{
5608 unsigned long flags;
5609
5610 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
5611
5612 if (!debug_locks)
5613 return;
5614
5615 if (unlikely(!lockdep_enabled())) {
5616
5617 if (lockdep_nmi() && !trylock) {
5618 struct held_lock hlock;
5619
5620 hlock.acquire_ip = ip;
5621 hlock.instance = lock;
5622 hlock.nest_lock = nest_lock;
5623 hlock.irq_context = 2;
5624 hlock.trylock = trylock;
5625 hlock.read = read;
5626 hlock.check = check;
5627 hlock.hardirqs_off = true;
5628 hlock.references = 0;
5629
5630 verify_lock_unused(lock, &hlock, subclass);
5631 }
5632 return;
5633 }
5634
5635 raw_local_irq_save(flags);
5636 check_flags(flags);
5637
5638 lockdep_recursion_inc();
5639 __lock_acquire(lock, subclass, trylock, read, check,
5640 irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
5641 lockdep_recursion_finish();
5642 raw_local_irq_restore(flags);
5643}
5644EXPORT_SYMBOL_GPL(lock_acquire);
5645
5646void lock_release(struct lockdep_map *lock, unsigned long ip)
5647{
5648 unsigned long flags;
5649
5650 trace_lock_release(lock, ip);
5651
5652 if (unlikely(!lockdep_enabled()))
5653 return;
5654
5655 raw_local_irq_save(flags);
5656 check_flags(flags);
5657
5658 lockdep_recursion_inc();
5659 if (__lock_release(lock, ip))
5660 check_chain_key(current);
5661 lockdep_recursion_finish();
5662 raw_local_irq_restore(flags);
5663}
5664EXPORT_SYMBOL_GPL(lock_release);
5665
5666noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
5667{
5668 unsigned long flags;
5669 int ret = LOCK_STATE_NOT_HELD;
5670
5671
5672
5673
5674
5675 if (unlikely(!lockdep_enabled()))
5676 return LOCK_STATE_UNKNOWN;
5677
5678 raw_local_irq_save(flags);
5679 check_flags(flags);
5680
5681 lockdep_recursion_inc();
5682 ret = __lock_is_held(lock, read);
5683 lockdep_recursion_finish();
5684 raw_local_irq_restore(flags);
5685
5686 return ret;
5687}
5688EXPORT_SYMBOL_GPL(lock_is_held_type);
5689NOKPROBE_SYMBOL(lock_is_held_type);
5690
5691struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
5692{
5693 struct pin_cookie cookie = NIL_COOKIE;
5694 unsigned long flags;
5695
5696 if (unlikely(!lockdep_enabled()))
5697 return cookie;
5698
5699 raw_local_irq_save(flags);
5700 check_flags(flags);
5701
5702 lockdep_recursion_inc();
5703 cookie = __lock_pin_lock(lock);
5704 lockdep_recursion_finish();
5705 raw_local_irq_restore(flags);
5706
5707 return cookie;
5708}
5709EXPORT_SYMBOL_GPL(lock_pin_lock);
5710
5711void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5712{
5713 unsigned long flags;
5714
5715 if (unlikely(!lockdep_enabled()))
5716 return;
5717
5718 raw_local_irq_save(flags);
5719 check_flags(flags);
5720
5721 lockdep_recursion_inc();
5722 __lock_repin_lock(lock, cookie);
5723 lockdep_recursion_finish();
5724 raw_local_irq_restore(flags);
5725}
5726EXPORT_SYMBOL_GPL(lock_repin_lock);
5727
5728void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5729{
5730 unsigned long flags;
5731
5732 if (unlikely(!lockdep_enabled()))
5733 return;
5734
5735 raw_local_irq_save(flags);
5736 check_flags(flags);
5737
5738 lockdep_recursion_inc();
5739 __lock_unpin_lock(lock, cookie);
5740 lockdep_recursion_finish();
5741 raw_local_irq_restore(flags);
5742}
5743EXPORT_SYMBOL_GPL(lock_unpin_lock);
5744
5745#ifdef CONFIG_LOCK_STAT
5746static void print_lock_contention_bug(struct task_struct *curr,
5747 struct lockdep_map *lock,
5748 unsigned long ip)
5749{
5750 if (!debug_locks_off())
5751 return;
5752 if (debug_locks_silent)
5753 return;
5754
5755 pr_warn("\n");
5756 pr_warn("=================================\n");
5757 pr_warn("WARNING: bad contention detected!\n");
5758 print_kernel_ident();
5759 pr_warn("---------------------------------\n");
5760 pr_warn("%s/%d is trying to contend lock (",
5761 curr->comm, task_pid_nr(curr));
5762 print_lockdep_cache(lock);
5763 pr_cont(") at:\n");
5764 print_ip_sym(KERN_WARNING, ip);
5765 pr_warn("but there are no locks held!\n");
5766 pr_warn("\nother info that might help us debug this:\n");
5767 lockdep_print_held_locks(curr);
5768
5769 pr_warn("\nstack backtrace:\n");
5770 dump_stack();
5771}
5772
5773static void
5774__lock_contended(struct lockdep_map *lock, unsigned long ip)
5775{
5776 struct task_struct *curr = current;
5777 struct held_lock *hlock;
5778 struct lock_class_stats *stats;
5779 unsigned int depth;
5780 int i, contention_point, contending_point;
5781
5782 depth = curr->lockdep_depth;
5783
5784
5785
5786
5787 if (DEBUG_LOCKS_WARN_ON(!depth))
5788 return;
5789
5790 hlock = find_held_lock(curr, lock, depth, &i);
5791 if (!hlock) {
5792 print_lock_contention_bug(curr, lock, ip);
5793 return;
5794 }
5795
5796 if (hlock->instance != lock)
5797 return;
5798
5799 hlock->waittime_stamp = lockstat_clock();
5800
5801 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
5802 contending_point = lock_point(hlock_class(hlock)->contending_point,
5803 lock->ip);
5804
5805 stats = get_lock_stats(hlock_class(hlock));
5806 if (contention_point < LOCKSTAT_POINTS)
5807 stats->contention_point[contention_point]++;
5808 if (contending_point < LOCKSTAT_POINTS)
5809 stats->contending_point[contending_point]++;
5810 if (lock->cpu != smp_processor_id())
5811 stats->bounces[bounce_contended + !!hlock->read]++;
5812}
5813
5814static void
5815__lock_acquired(struct lockdep_map *lock, unsigned long ip)
5816{
5817 struct task_struct *curr = current;
5818 struct held_lock *hlock;
5819 struct lock_class_stats *stats;
5820 unsigned int depth;
5821 u64 now, waittime = 0;
5822 int i, cpu;
5823
5824 depth = curr->lockdep_depth;
5825
5826
5827
5828
5829 if (DEBUG_LOCKS_WARN_ON(!depth))
5830 return;
5831
5832 hlock = find_held_lock(curr, lock, depth, &i);
5833 if (!hlock) {
5834 print_lock_contention_bug(curr, lock, _RET_IP_);
5835 return;
5836 }
5837
5838 if (hlock->instance != lock)
5839 return;
5840
5841 cpu = smp_processor_id();
5842 if (hlock->waittime_stamp) {
5843 now = lockstat_clock();
5844 waittime = now - hlock->waittime_stamp;
5845 hlock->holdtime_stamp = now;
5846 }
5847
5848 stats = get_lock_stats(hlock_class(hlock));
5849 if (waittime) {
5850 if (hlock->read)
5851 lock_time_inc(&stats->read_waittime, waittime);
5852 else
5853 lock_time_inc(&stats->write_waittime, waittime);
5854 }
5855 if (lock->cpu != cpu)
5856 stats->bounces[bounce_acquired + !!hlock->read]++;
5857
5858 lock->cpu = cpu;
5859 lock->ip = ip;
5860}
5861
5862void lock_contended(struct lockdep_map *lock, unsigned long ip)
5863{
5864 unsigned long flags;
5865
5866 trace_lock_contended(lock, ip);
5867
5868 if (unlikely(!lock_stat || !lockdep_enabled()))
5869 return;
5870
5871 raw_local_irq_save(flags);
5872 check_flags(flags);
5873 lockdep_recursion_inc();
5874 __lock_contended(lock, ip);
5875 lockdep_recursion_finish();
5876 raw_local_irq_restore(flags);
5877}
5878EXPORT_SYMBOL_GPL(lock_contended);
5879
5880void lock_acquired(struct lockdep_map *lock, unsigned long ip)
5881{
5882 unsigned long flags;
5883
5884 trace_lock_acquired(lock, ip);
5885
5886 if (unlikely(!lock_stat || !lockdep_enabled()))
5887 return;
5888
5889 raw_local_irq_save(flags);
5890 check_flags(flags);
5891 lockdep_recursion_inc();
5892 __lock_acquired(lock, ip);
5893 lockdep_recursion_finish();
5894 raw_local_irq_restore(flags);
5895}
5896EXPORT_SYMBOL_GPL(lock_acquired);
5897#endif
5898
5899
5900
5901
5902
5903
5904void lockdep_reset(void)
5905{
5906 unsigned long flags;
5907 int i;
5908
5909 raw_local_irq_save(flags);
5910 lockdep_init_task(current);
5911 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
5912 nr_hardirq_chains = 0;
5913 nr_softirq_chains = 0;
5914 nr_process_chains = 0;
5915 debug_locks = 1;
5916 for (i = 0; i < CHAINHASH_SIZE; i++)
5917 INIT_HLIST_HEAD(chainhash_table + i);
5918 raw_local_irq_restore(flags);
5919}
5920
5921
5922static void remove_class_from_lock_chain(struct pending_free *pf,
5923 struct lock_chain *chain,
5924 struct lock_class *class)
5925{
5926#ifdef CONFIG_PROVE_LOCKING
5927 int i;
5928
5929 for (i = chain->base; i < chain->base + chain->depth; i++) {
5930 if (chain_hlock_class_idx(chain_hlocks[i]) != class - lock_classes)
5931 continue;
5932
5933
5934
5935
5936 goto free_lock_chain;
5937 }
5938
5939 return;
5940
5941free_lock_chain:
5942 free_chain_hlocks(chain->base, chain->depth);
5943
5944 WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY);
5945 dec_chains(chain->irq_context);
5946
5947
5948
5949
5950
5951 hlist_del_rcu(&chain->entry);
5952 __set_bit(chain - lock_chains, pf->lock_chains_being_freed);
5953 nr_zapped_lock_chains++;
5954#endif
5955}
5956
5957
5958static void remove_class_from_lock_chains(struct pending_free *pf,
5959 struct lock_class *class)
5960{
5961 struct lock_chain *chain;
5962 struct hlist_head *head;
5963 int i;
5964
5965 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
5966 head = chainhash_table + i;
5967 hlist_for_each_entry_rcu(chain, head, entry) {
5968 remove_class_from_lock_chain(pf, chain, class);
5969 }
5970 }
5971}
5972
5973
5974
5975
5976static void zap_class(struct pending_free *pf, struct lock_class *class)
5977{
5978 struct lock_list *entry;
5979 int i;
5980
5981 WARN_ON_ONCE(!class->key);
5982
5983
5984
5985
5986
5987 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
5988 entry = list_entries + i;
5989 if (entry->class != class && entry->links_to != class)
5990 continue;
5991 __clear_bit(i, list_entries_in_use);
5992 nr_list_entries--;
5993 list_del_rcu(&entry->entry);
5994 }
5995 if (list_empty(&class->locks_after) &&
5996 list_empty(&class->locks_before)) {
5997 list_move_tail(&class->lock_entry, &pf->zapped);
5998 hlist_del_rcu(&class->hash_entry);
5999 WRITE_ONCE(class->key, NULL);
6000 WRITE_ONCE(class->name, NULL);
6001 nr_lock_classes--;
6002 __clear_bit(class - lock_classes, lock_classes_in_use);
6003 } else {
6004 WARN_ONCE(true, "%s() failed for class %s\n", __func__,
6005 class->name);
6006 }
6007
6008 remove_class_from_lock_chains(pf, class);
6009 nr_zapped_classes++;
6010}
6011
6012static void reinit_class(struct lock_class *class)
6013{
6014 void *const p = class;
6015 const unsigned int offset = offsetof(struct lock_class, key);
6016
6017 WARN_ON_ONCE(!class->lock_entry.next);
6018 WARN_ON_ONCE(!list_empty(&class->locks_after));
6019 WARN_ON_ONCE(!list_empty(&class->locks_before));
6020 memset(p + offset, 0, sizeof(*class) - offset);
6021 WARN_ON_ONCE(!class->lock_entry.next);
6022 WARN_ON_ONCE(!list_empty(&class->locks_after));
6023 WARN_ON_ONCE(!list_empty(&class->locks_before));
6024}
6025
6026static inline int within(const void *addr, void *start, unsigned long size)
6027{
6028 return addr >= start && addr < start + size;
6029}
6030
6031static bool inside_selftest(void)
6032{
6033 return current == lockdep_selftest_task_struct;
6034}
6035
6036
6037static struct pending_free *get_pending_free(void)
6038{
6039 return delayed_free.pf + delayed_free.index;
6040}
6041
6042static void free_zapped_rcu(struct rcu_head *cb);
6043
6044
6045
6046
6047
6048static void call_rcu_zapped(struct pending_free *pf)
6049{
6050 WARN_ON_ONCE(inside_selftest());
6051
6052 if (list_empty(&pf->zapped))
6053 return;
6054
6055 if (delayed_free.scheduled)
6056 return;
6057
6058 delayed_free.scheduled = true;
6059
6060 WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
6061 delayed_free.index ^= 1;
6062
6063 call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
6064}
6065
6066
6067static void __free_zapped_classes(struct pending_free *pf)
6068{
6069 struct lock_class *class;
6070
6071 check_data_structures();
6072
6073 list_for_each_entry(class, &pf->zapped, lock_entry)
6074 reinit_class(class);
6075
6076 list_splice_init(&pf->zapped, &free_lock_classes);
6077
6078#ifdef CONFIG_PROVE_LOCKING
6079 bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
6080 pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
6081 bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
6082#endif
6083}
6084
6085static void free_zapped_rcu(struct rcu_head *ch)
6086{
6087 struct pending_free *pf;
6088 unsigned long flags;
6089
6090 if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
6091 return;
6092
6093 raw_local_irq_save(flags);
6094 lockdep_lock();
6095
6096
6097 pf = delayed_free.pf + (delayed_free.index ^ 1);
6098 __free_zapped_classes(pf);
6099 delayed_free.scheduled = false;
6100
6101
6102
6103
6104 call_rcu_zapped(delayed_free.pf + delayed_free.index);
6105
6106 lockdep_unlock();
6107 raw_local_irq_restore(flags);
6108}
6109
6110
6111
6112
6113
6114
6115
6116static void __lockdep_free_key_range(struct pending_free *pf, void *start,
6117 unsigned long size)
6118{
6119 struct lock_class *class;
6120 struct hlist_head *head;
6121 int i;
6122
6123
6124 for (i = 0; i < CLASSHASH_SIZE; i++) {
6125 head = classhash_table + i;
6126 hlist_for_each_entry_rcu(class, head, hash_entry) {
6127 if (!within(class->key, start, size) &&
6128 !within(class->name, start, size))
6129 continue;
6130 zap_class(pf, class);
6131 }
6132 }
6133}
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143static void lockdep_free_key_range_reg(void *start, unsigned long size)
6144{
6145 struct pending_free *pf;
6146 unsigned long flags;
6147
6148 init_data_structures_once();
6149
6150 raw_local_irq_save(flags);
6151 lockdep_lock();
6152 pf = get_pending_free();
6153 __lockdep_free_key_range(pf, start, size);
6154 call_rcu_zapped(pf);
6155 lockdep_unlock();
6156 raw_local_irq_restore(flags);
6157
6158
6159
6160
6161
6162 synchronize_rcu();
6163}
6164
6165
6166
6167
6168
6169static void lockdep_free_key_range_imm(void *start, unsigned long size)
6170{
6171 struct pending_free *pf = delayed_free.pf;
6172 unsigned long flags;
6173
6174 init_data_structures_once();
6175
6176 raw_local_irq_save(flags);
6177 lockdep_lock();
6178 __lockdep_free_key_range(pf, start, size);
6179 __free_zapped_classes(pf);
6180 lockdep_unlock();
6181 raw_local_irq_restore(flags);
6182}
6183
6184void lockdep_free_key_range(void *start, unsigned long size)
6185{
6186 init_data_structures_once();
6187
6188 if (inside_selftest())
6189 lockdep_free_key_range_imm(start, size);
6190 else
6191 lockdep_free_key_range_reg(start, size);
6192}
6193
6194
6195
6196
6197
6198
6199static bool lock_class_cache_is_registered(struct lockdep_map *lock)
6200{
6201 struct lock_class *class;
6202 struct hlist_head *head;
6203 int i, j;
6204
6205 for (i = 0; i < CLASSHASH_SIZE; i++) {
6206 head = classhash_table + i;
6207 hlist_for_each_entry_rcu(class, head, hash_entry) {
6208 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
6209 if (lock->class_cache[j] == class)
6210 return true;
6211 }
6212 }
6213 return false;
6214}
6215
6216
6217static void __lockdep_reset_lock(struct pending_free *pf,
6218 struct lockdep_map *lock)
6219{
6220 struct lock_class *class;
6221 int j;
6222
6223
6224
6225
6226 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
6227
6228
6229
6230 class = look_up_lock_class(lock, j);
6231 if (class)
6232 zap_class(pf, class);
6233 }
6234
6235
6236
6237
6238 if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
6239 debug_locks_off();
6240}
6241
6242
6243
6244
6245
6246static void lockdep_reset_lock_reg(struct lockdep_map *lock)
6247{
6248 struct pending_free *pf;
6249 unsigned long flags;
6250 int locked;
6251
6252 raw_local_irq_save(flags);
6253 locked = graph_lock();
6254 if (!locked)
6255 goto out_irq;
6256
6257 pf = get_pending_free();
6258 __lockdep_reset_lock(pf, lock);
6259 call_rcu_zapped(pf);
6260
6261 graph_unlock();
6262out_irq:
6263 raw_local_irq_restore(flags);
6264}
6265
6266
6267
6268
6269
6270static void lockdep_reset_lock_imm(struct lockdep_map *lock)
6271{
6272 struct pending_free *pf = delayed_free.pf;
6273 unsigned long flags;
6274
6275 raw_local_irq_save(flags);
6276 lockdep_lock();
6277 __lockdep_reset_lock(pf, lock);
6278 __free_zapped_classes(pf);
6279 lockdep_unlock();
6280 raw_local_irq_restore(flags);
6281}
6282
6283void lockdep_reset_lock(struct lockdep_map *lock)
6284{
6285 init_data_structures_once();
6286
6287 if (inside_selftest())
6288 lockdep_reset_lock_imm(lock);
6289 else
6290 lockdep_reset_lock_reg(lock);
6291}
6292
6293
6294void lockdep_unregister_key(struct lock_class_key *key)
6295{
6296 struct hlist_head *hash_head = keyhashentry(key);
6297 struct lock_class_key *k;
6298 struct pending_free *pf;
6299 unsigned long flags;
6300 bool found = false;
6301
6302 might_sleep();
6303
6304 if (WARN_ON_ONCE(static_obj(key)))
6305 return;
6306
6307 raw_local_irq_save(flags);
6308 if (!graph_lock())
6309 goto out_irq;
6310
6311 pf = get_pending_free();
6312 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
6313 if (k == key) {
6314 hlist_del_rcu(&k->hash_entry);
6315 found = true;
6316 break;
6317 }
6318 }
6319 WARN_ON_ONCE(!found);
6320 __lockdep_free_key_range(pf, key, 1);
6321 call_rcu_zapped(pf);
6322 graph_unlock();
6323out_irq:
6324 raw_local_irq_restore(flags);
6325
6326
6327 synchronize_rcu();
6328}
6329EXPORT_SYMBOL_GPL(lockdep_unregister_key);
6330
6331void __init lockdep_init(void)
6332{
6333 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
6334
6335 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
6336 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
6337 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
6338 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
6339 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
6340 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
6341 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
6342
6343 printk(" memory used by lock dependency info: %zu kB\n",
6344 (sizeof(lock_classes) +
6345 sizeof(lock_classes_in_use) +
6346 sizeof(classhash_table) +
6347 sizeof(list_entries) +
6348 sizeof(list_entries_in_use) +
6349 sizeof(chainhash_table) +
6350 sizeof(delayed_free)
6351#ifdef CONFIG_PROVE_LOCKING
6352 + sizeof(lock_cq)
6353 + sizeof(lock_chains)
6354 + sizeof(lock_chains_in_use)
6355 + sizeof(chain_hlocks)
6356#endif
6357 ) / 1024
6358 );
6359
6360#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
6361 printk(" memory used for stack traces: %zu kB\n",
6362 (sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024
6363 );
6364#endif
6365
6366 printk(" per task-struct memory footprint: %zu bytes\n",
6367 sizeof(((struct task_struct *)NULL)->held_locks));
6368}
6369
6370static void
6371print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
6372 const void *mem_to, struct held_lock *hlock)
6373{
6374 if (!debug_locks_off())
6375 return;
6376 if (debug_locks_silent)
6377 return;
6378
6379 pr_warn("\n");
6380 pr_warn("=========================\n");
6381 pr_warn("WARNING: held lock freed!\n");
6382 print_kernel_ident();
6383 pr_warn("-------------------------\n");
6384 pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
6385 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
6386 print_lock(hlock);
6387 lockdep_print_held_locks(curr);
6388
6389 pr_warn("\nstack backtrace:\n");
6390 dump_stack();
6391}
6392
6393static inline int not_in_range(const void* mem_from, unsigned long mem_len,
6394 const void* lock_from, unsigned long lock_len)
6395{
6396 return lock_from + lock_len <= mem_from ||
6397 mem_from + mem_len <= lock_from;
6398}
6399
6400
6401
6402
6403
6404
6405void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
6406{
6407 struct task_struct *curr = current;
6408 struct held_lock *hlock;
6409 unsigned long flags;
6410 int i;
6411
6412 if (unlikely(!debug_locks))
6413 return;
6414
6415 raw_local_irq_save(flags);
6416 for (i = 0; i < curr->lockdep_depth; i++) {
6417 hlock = curr->held_locks + i;
6418
6419 if (not_in_range(mem_from, mem_len, hlock->instance,
6420 sizeof(*hlock->instance)))
6421 continue;
6422
6423 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
6424 break;
6425 }
6426 raw_local_irq_restore(flags);
6427}
6428EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
6429
6430static void print_held_locks_bug(void)
6431{
6432 if (!debug_locks_off())
6433 return;
6434 if (debug_locks_silent)
6435 return;
6436
6437 pr_warn("\n");
6438 pr_warn("====================================\n");
6439 pr_warn("WARNING: %s/%d still has locks held!\n",
6440 current->comm, task_pid_nr(current));
6441 print_kernel_ident();
6442 pr_warn("------------------------------------\n");
6443 lockdep_print_held_locks(current);
6444 pr_warn("\nstack backtrace:\n");
6445 dump_stack();
6446}
6447
6448void debug_check_no_locks_held(void)
6449{
6450 if (unlikely(current->lockdep_depth > 0))
6451 print_held_locks_bug();
6452}
6453EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
6454
6455#ifdef __KERNEL__
6456void debug_show_all_locks(void)
6457{
6458 struct task_struct *g, *p;
6459
6460 if (unlikely(!debug_locks)) {
6461 pr_warn("INFO: lockdep is turned off.\n");
6462 return;
6463 }
6464 pr_warn("\nShowing all locks held in the system:\n");
6465
6466 rcu_read_lock();
6467 for_each_process_thread(g, p) {
6468 if (!p->lockdep_depth)
6469 continue;
6470 lockdep_print_held_locks(p);
6471 touch_nmi_watchdog();
6472 touch_all_softlockup_watchdogs();
6473 }
6474 rcu_read_unlock();
6475
6476 pr_warn("\n");
6477 pr_warn("=============================================\n\n");
6478}
6479EXPORT_SYMBOL_GPL(debug_show_all_locks);
6480#endif
6481
6482
6483
6484
6485
6486void debug_show_held_locks(struct task_struct *task)
6487{
6488 if (unlikely(!debug_locks)) {
6489 printk("INFO: lockdep is turned off.\n");
6490 return;
6491 }
6492 lockdep_print_held_locks(task);
6493}
6494EXPORT_SYMBOL_GPL(debug_show_held_locks);
6495
6496asmlinkage __visible void lockdep_sys_exit(void)
6497{
6498 struct task_struct *curr = current;
6499
6500 if (unlikely(curr->lockdep_depth)) {
6501 if (!debug_locks_off())
6502 return;
6503 pr_warn("\n");
6504 pr_warn("================================================\n");
6505 pr_warn("WARNING: lock held when returning to user space!\n");
6506 print_kernel_ident();
6507 pr_warn("------------------------------------------------\n");
6508 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
6509 curr->comm, curr->pid);
6510 lockdep_print_held_locks(curr);
6511 }
6512
6513
6514
6515
6516
6517 lockdep_invariant_state(false);
6518}
6519
6520void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
6521{
6522 struct task_struct *curr = current;
6523 int dl = READ_ONCE(debug_locks);
6524
6525
6526 pr_warn("\n");
6527 pr_warn("=============================\n");
6528 pr_warn("WARNING: suspicious RCU usage\n");
6529 print_kernel_ident();
6530 pr_warn("-----------------------------\n");
6531 pr_warn("%s:%d %s!\n", file, line, s);
6532 pr_warn("\nother info that might help us debug this:\n\n");
6533 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n%s",
6534 !rcu_lockdep_current_cpu_online()
6535 ? "RCU used illegally from offline CPU!\n"
6536 : "",
6537 rcu_scheduler_active, dl,
6538 dl ? "" : "Possible false positive due to lockdep disabling via debug_locks = 0\n");
6539
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556
6557
6558 if (!rcu_is_watching())
6559 pr_warn("RCU used illegally from extended quiescent state!\n");
6560
6561 lockdep_print_held_locks(curr);
6562 pr_warn("\nstack backtrace:\n");
6563 dump_stack();
6564}
6565EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
6566