1
2
3
4
5
6
7
8
9
10#ifndef __LINUX_LOCKDEP_H
11#define __LINUX_LOCKDEP_H
12
13#include <linux/rh_kabi.h>
14#include <linux/lockdep_types.h>
15#include RH_KABI_HIDE_INCLUDE(<linux/smp.h>)
16#include <asm/percpu.h>
17
18struct task_struct;
19
20
21extern int prove_locking;
22extern int lock_stat;
23
24#ifdef CONFIG_LOCKDEP
25
26#include <linux/linkage.h>
27#include <linux/list.h>
28#include <linux/debug_locks.h>
29#include <linux/stacktrace.h>
30
31static inline void lockdep_copy_map(struct lockdep_map *to,
32 struct lockdep_map *from)
33{
34 int i;
35
36 *to = *from;
37
38
39
40
41
42
43
44
45 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
46 to->class_cache[i] = NULL;
47}
48
49
50
51
52
53struct lock_list {
54 struct list_head entry;
55 struct lock_class *class;
56 struct lock_class *links_to;
57 const struct lock_trace *trace;
58 u16 distance;
59
60 u8 dep;
61
62 u8 only_xr;
63
64
65
66
67
68 struct lock_list *parent;
69};
70
71
72
73
74
75
76
77
78
79
80struct lock_chain {
81
82 unsigned int irq_context : 2,
83 depth : 6,
84 base : 24;
85
86 struct hlist_node entry;
87 u64 chain_key;
88};
89
90#define MAX_LOCKDEP_KEYS_BITS 13
91#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
92#define INITIAL_CHAIN_KEY -1
93
94struct held_lock {
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109 u64 prev_chain_key;
110 unsigned long acquire_ip;
111 struct lockdep_map *instance;
112 struct lockdep_map *nest_lock;
113#ifdef CONFIG_LOCK_STAT
114 u64 waittime_stamp;
115 u64 holdtime_stamp;
116#endif
117
118
119
120
121
122 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
123
124
125
126
127
128
129
130
131
132
133
134
135
136 unsigned int irq_context:2;
137 unsigned int trylock:1;
138
139 unsigned int read:2;
140 unsigned int check:1;
141 unsigned int hardirqs_off:1;
142 unsigned int references:12;
143 unsigned int pin_count;
144};
145
146
147
148
149extern void lockdep_init(void);
150extern void lockdep_reset(void);
151extern void lockdep_reset_lock(struct lockdep_map *lock);
152extern void lockdep_free_key_range(void *start, unsigned long size);
153extern asmlinkage void lockdep_sys_exit(void);
154extern void lockdep_set_selftest_task(struct task_struct *task);
155
156extern void lockdep_init_task(struct task_struct *task);
157
158
159
160
161#define LOCKDEP_RECURSION_BITS 16
162#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
163#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
164
165
166
167
168
169
170#define lockdep_off() \
171do { \
172 current->lockdep_recursion += LOCKDEP_OFF; \
173} while (0)
174
175#define lockdep_on() \
176do { \
177 current->lockdep_recursion -= LOCKDEP_OFF; \
178} while (0)
179
180extern void lockdep_register_key(struct lock_class_key *key);
181extern void lockdep_unregister_key(struct lock_class_key *key);
182
183
184
185
186
187
188
189extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
190 struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
191
192static inline void
193lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
194 struct lock_class_key *key, int subclass, u8 inner, u8 outer)
195{
196 lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL);
197}
198
199static inline void
200lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
201 struct lock_class_key *key, int subclass, u8 inner)
202{
203 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
204}
205
206static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
207 struct lock_class_key *key, int subclass)
208{
209 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
210}
211
212
213
214
215
216
217
218#define lockdep_set_class(lock, key) \
219 lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \
220 (lock)->dep_map.wait_type_inner, \
221 (lock)->dep_map.wait_type_outer)
222
223#define lockdep_set_class_and_name(lock, key, name) \
224 lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \
225 (lock)->dep_map.wait_type_inner, \
226 (lock)->dep_map.wait_type_outer)
227
228#define lockdep_set_class_and_subclass(lock, key, sub) \
229 lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
230 (lock)->dep_map.wait_type_inner, \
231 (lock)->dep_map.wait_type_outer)
232
233#define lockdep_set_subclass(lock, sub) \
234 lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
235 (lock)->dep_map.wait_type_inner, \
236 (lock)->dep_map.wait_type_outer)
237
238#define lockdep_set_novalidate_class(lock) \
239 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
240
241
242
243
244#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
245
246static inline int lockdep_match_key(struct lockdep_map *lock,
247 struct lock_class_key *key)
248{
249 return lock->key == key;
250}
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
267 int trylock, int read, int check,
268 struct lockdep_map *nest_lock, unsigned long ip);
269
270extern void lock_release(struct lockdep_map *lock, unsigned long ip);
271
272
273#define LOCK_STATE_UNKNOWN -1
274#define LOCK_STATE_NOT_HELD 0
275#define LOCK_STATE_HELD 1
276
277
278
279
280extern int lock_is_held_type(const struct lockdep_map *lock, int read);
281
282static inline int lock_is_held(const struct lockdep_map *lock)
283{
284 return lock_is_held_type(lock, -1);
285}
286
287#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
288#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
289
290extern void lock_set_class(struct lockdep_map *lock, const char *name,
291 struct lock_class_key *key, unsigned int subclass,
292 unsigned long ip);
293
294static inline void lock_set_subclass(struct lockdep_map *lock,
295 unsigned int subclass, unsigned long ip)
296{
297 lock_set_class(lock, lock->name, lock->key, subclass, ip);
298}
299
300extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
301
302#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
303
304extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
305extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
306extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
307
308#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
309
310#define lockdep_assert_held(l) do { \
311 WARN_ON(debug_locks && \
312 lockdep_is_held(l) == LOCK_STATE_NOT_HELD); \
313 } while (0)
314
315#define lockdep_assert_not_held(l) do { \
316 WARN_ON(debug_locks && \
317 lockdep_is_held(l) == LOCK_STATE_HELD); \
318 } while (0)
319
320#define lockdep_assert_held_write(l) do { \
321 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
322 } while (0)
323
324#define lockdep_assert_held_read(l) do { \
325 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
326 } while (0)
327
328#define lockdep_assert_held_once(l) do { \
329 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
330 } while (0)
331
332#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
333
334#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
335#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
336#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
337
338#else
339
340static inline void lockdep_init_task(struct task_struct *task)
341{
342}
343
344static inline void lockdep_off(void)
345{
346}
347
348static inline void lockdep_on(void)
349{
350}
351
352static inline void lockdep_set_selftest_task(struct task_struct *task)
353{
354}
355
356# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
357# define lock_release(l, i) do { } while (0)
358# define lock_downgrade(l, i) do { } while (0)
359# define lock_set_class(l, n, k, s, i) do { } while (0)
360# define lock_set_subclass(l, s, i) do { } while (0)
361# define lockdep_init() do { } while (0)
362# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
363 do { (void)(name); (void)(key); } while (0)
364# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
365 do { (void)(name); (void)(key); } while (0)
366# define lockdep_init_map_wait(lock, name, key, sub, inner) \
367 do { (void)(name); (void)(key); } while (0)
368# define lockdep_init_map(lock, name, key, sub) \
369 do { (void)(name); (void)(key); } while (0)
370# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
371# define lockdep_set_class_and_name(lock, key, name) \
372 do { (void)(key); (void)(name); } while (0)
373#define lockdep_set_class_and_subclass(lock, key, sub) \
374 do { (void)(key); } while (0)
375#define lockdep_set_subclass(lock, sub) do { } while (0)
376
377#define lockdep_set_novalidate_class(lock) do { } while (0)
378
379
380
381
382
383
384
385# define lockdep_reset() do { debug_locks = 1; } while (0)
386# define lockdep_free_key_range(start, size) do { } while (0)
387# define lockdep_sys_exit() do { } while (0)
388
389static inline void lockdep_register_key(struct lock_class_key *key)
390{
391}
392
393static inline void lockdep_unregister_key(struct lock_class_key *key)
394{
395}
396
397#define lockdep_depth(tsk) (0)
398
399
400
401
402
403extern int lock_is_held(const void *);
404extern int lockdep_is_held(const void *);
405#define lockdep_is_held_type(l, r) (1)
406
407#define lockdep_assert_held(l) do { (void)(l); } while (0)
408#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
409#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
410#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
411#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
412
413#define lockdep_recursing(tsk) (0)
414
415#define NIL_COOKIE (struct pin_cookie){ }
416
417#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
418#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
419#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
420
421#endif
422
423#define lockdep_assert_held_exclusive lockdep_assert_held_write
424
425enum xhlock_context_t {
426 XHLOCK_HARD,
427 XHLOCK_SOFT,
428 XHLOCK_CTX_NR,
429};
430
431#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
432
433
434
435
436#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
437 { .name = (_name), .key = (void *)(_key), }
438
439static inline void lockdep_invariant_state(bool force) {}
440static inline void lockdep_free_task(struct task_struct *task) {}
441
442#ifdef CONFIG_LOCK_STAT
443
444extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
445extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
446
447#define LOCK_CONTENDED(_lock, try, lock) \
448do { \
449 if (!try(_lock)) { \
450 lock_contended(&(_lock)->dep_map, _RET_IP_); \
451 lock(_lock); \
452 } \
453 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
454} while (0)
455
456#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
457({ \
458 int ____err = 0; \
459 if (!try(_lock)) { \
460 lock_contended(&(_lock)->dep_map, _RET_IP_); \
461 ____err = lock(_lock); \
462 } \
463 if (!____err) \
464 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
465 ____err; \
466})
467
468#else
469
470#define lock_contended(lockdep_map, ip) do {} while (0)
471#define lock_acquired(lockdep_map, ip) do {} while (0)
472
473#define LOCK_CONTENDED(_lock, try, lock) \
474 lock(_lock)
475
476#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
477 lock(_lock)
478
479#endif
480
481#ifdef CONFIG_LOCKDEP
482
483
484
485
486
487
488#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
489 LOCK_CONTENDED((_lock), (try), (lock))
490
491#else
492
493#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
494 lockfl((_lock), (flags))
495
496#endif
497
498#ifdef CONFIG_PROVE_LOCKING
499extern void print_irqtrace_events(struct task_struct *curr);
500#else
501static inline void print_irqtrace_events(struct task_struct *curr)
502{
503}
504#endif
505
506
507#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
508extern unsigned int force_read_lock_recursive;
509#else
510#define force_read_lock_recursive 0
511#endif
512
513#ifdef CONFIG_LOCKDEP
514extern bool read_lock_is_recursive(void);
515#else
516
517#define read_lock_is_recursive() 0
518#endif
519
520
521
522
523
524
525#define SINGLE_DEPTH_NESTING 1
526
527
528
529
530
531
532#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
533#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
534#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
535
536#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
537#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
538#define spin_release(l, i) lock_release(l, i)
539
540#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
541#define rwlock_acquire_read(l, s, t, i) \
542do { \
543 if (read_lock_is_recursive()) \
544 lock_acquire_shared_recursive(l, s, t, NULL, i); \
545 else \
546 lock_acquire_shared(l, s, t, NULL, i); \
547} while (0)
548
549#define rwlock_release(l, i) lock_release(l, i)
550
551#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
552#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
553#define seqcount_release(l, i) lock_release(l, i)
554
555#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
556#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
557#define mutex_release(l, i) lock_release(l, i)
558
559#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
560#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
561#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
562#define rwsem_release(l, i) lock_release(l, i)
563
564#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
565#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
566#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
567#define lock_map_release(l) lock_release(l, _THIS_IP_)
568
569#ifdef CONFIG_PROVE_LOCKING
570# define might_lock(lock) \
571do { \
572 typecheck(struct lockdep_map *, &(lock)->dep_map); \
573 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
574 lock_release(&(lock)->dep_map, _THIS_IP_); \
575} while (0)
576# define might_lock_read(lock) \
577do { \
578 typecheck(struct lockdep_map *, &(lock)->dep_map); \
579 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
580 lock_release(&(lock)->dep_map, _THIS_IP_); \
581} while (0)
582# define might_lock_nested(lock, subclass) \
583do { \
584 typecheck(struct lockdep_map *, &(lock)->dep_map); \
585 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
586 _THIS_IP_); \
587 lock_release(&(lock)->dep_map, _THIS_IP_); \
588} while (0)
589
590DECLARE_PER_CPU(int, hardirqs_enabled);
591DECLARE_PER_CPU(int, hardirq_context);
592DECLARE_PER_CPU(unsigned int, lockdep_recursion);
593
594#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
595
596#define lockdep_assert_irqs_enabled() \
597do { \
598 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
599} while (0)
600
601#define lockdep_assert_irqs_disabled() \
602do { \
603 WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
604} while (0)
605
606#define lockdep_assert_in_irq() \
607do { \
608 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
609} while (0)
610
611#define lockdep_assert_preemption_enabled() \
612do { \
613 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
614 __lockdep_enabled && \
615 (preempt_count() != 0 || \
616 !this_cpu_read(hardirqs_enabled))); \
617} while (0)
618
619#define lockdep_assert_preemption_disabled() \
620do { \
621 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
622 __lockdep_enabled && \
623 (preempt_count() == 0 && \
624 this_cpu_read(hardirqs_enabled))); \
625} while (0)
626
627
628
629
630
631#define lockdep_assert_in_softirq() \
632do { \
633 WARN_ON_ONCE(__lockdep_enabled && \
634 (!in_softirq() || in_irq() || in_nmi())); \
635} while (0)
636
637#else
638# define might_lock(lock) do { } while (0)
639# define might_lock_read(lock) do { } while (0)
640# define might_lock_nested(lock, subclass) do { } while (0)
641
642# define lockdep_assert_irqs_enabled() do { } while (0)
643# define lockdep_assert_irqs_disabled() do { } while (0)
644# define lockdep_assert_in_irq() do { } while (0)
645
646# define lockdep_assert_preemption_enabled() do { } while (0)
647# define lockdep_assert_preemption_disabled() do { } while (0)
648# define lockdep_assert_in_softirq() do { } while (0)
649#endif
650
651#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
652
653# define lockdep_assert_RT_in_threaded_ctx() do { \
654 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
655 lockdep_hardirq_context() && \
656 !(current->hardirq_threaded || current->irq_config), \
657 "Not in threaded context on PREEMPT_RT as expected\n"); \
658} while (0)
659
660#else
661
662# define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
663
664#endif
665
666#ifdef CONFIG_LOCKDEP
667void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
668#else
669static inline void
670lockdep_rcu_suspicious(const char *file, const int line, const char *s)
671{
672}
673#endif
674
675#endif
676