linux/kernel/locking/lockdep_internals.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * kernel/lockdep_internals.h
   4 *
   5 * Runtime locking correctness validator
   6 *
   7 * lockdep subsystem internal functions and variables.
   8 */
   9
  10/*
  11 * Lock-class usage-state bits:
  12 */
  13enum lock_usage_bit {
  14#define LOCKDEP_STATE(__STATE)          \
  15        LOCK_USED_IN_##__STATE,         \
  16        LOCK_USED_IN_##__STATE##_READ,  \
  17        LOCK_ENABLED_##__STATE,         \
  18        LOCK_ENABLED_##__STATE##_READ,
  19#include "lockdep_states.h"
  20#undef LOCKDEP_STATE
  21        LOCK_USED,
  22        LOCK_USAGE_STATES
  23};
  24
  25/*
  26 * Usage-state bitmasks:
  27 */
  28#define __LOCKF(__STATE)        LOCKF_##__STATE = (1 << LOCK_##__STATE),
  29
  30enum {
  31#define LOCKDEP_STATE(__STATE)                                          \
  32        __LOCKF(USED_IN_##__STATE)                                      \
  33        __LOCKF(USED_IN_##__STATE##_READ)                               \
  34        __LOCKF(ENABLED_##__STATE)                                      \
  35        __LOCKF(ENABLED_##__STATE##_READ)
  36#include "lockdep_states.h"
  37#undef LOCKDEP_STATE
  38        __LOCKF(USED)
  39};
  40
  41#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
  42#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
  43
  44#define LOCKF_ENABLED_IRQ_READ \
  45                (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
  46#define LOCKF_USED_IN_IRQ_READ \
  47                (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
  48
  49/*
  50 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
  51 * .data and .bss to fit in required 32MB limit for the kernel. With
  52 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
  53 * So, reduce the static allocations for lockdeps related structures so that
  54 * everything fits in current required size limit.
  55 */
  56#ifdef CONFIG_LOCKDEP_SMALL
  57/*
  58 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
  59 * we track.
  60 *
  61 * We use the per-lock dependency maps in two ways: we grow it by adding
  62 * every to-be-taken lock to all currently held lock's own dependency
  63 * table (if it's not there yet), and we check it for lock order
  64 * conflicts and deadlocks.
  65 */
  66#define MAX_LOCKDEP_ENTRIES     16384UL
  67#define MAX_LOCKDEP_CHAINS_BITS 15
  68#define MAX_STACK_TRACE_ENTRIES 262144UL
  69#else
  70#define MAX_LOCKDEP_ENTRIES     32768UL
  71
  72#define MAX_LOCKDEP_CHAINS_BITS 16
  73
  74/*
  75 * Stack-trace: tightly packed array of stack backtrace
  76 * addresses. Protected by the hash_lock.
  77 */
  78#define MAX_STACK_TRACE_ENTRIES 524288UL
  79#endif
  80
  81#define MAX_LOCKDEP_CHAINS      (1UL << MAX_LOCKDEP_CHAINS_BITS)
  82
  83#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
  84
  85extern struct list_head all_lock_classes;
  86extern struct lock_chain lock_chains[];
  87
  88#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
  89
  90extern void get_usage_chars(struct lock_class *class,
  91                            char usage[LOCK_USAGE_CHARS]);
  92
  93extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
  94
  95struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
  96
  97extern unsigned long nr_lock_classes;
  98extern unsigned long nr_list_entries;
  99extern unsigned long nr_lock_chains;
 100extern int nr_chain_hlocks;
 101extern unsigned long nr_stack_trace_entries;
 102
 103extern unsigned int nr_hardirq_chains;
 104extern unsigned int nr_softirq_chains;
 105extern unsigned int nr_process_chains;
 106extern unsigned int max_lockdep_depth;
 107extern unsigned int max_recursion_depth;
 108
 109extern unsigned int max_bfs_queue_depth;
 110
 111#ifdef CONFIG_PROVE_LOCKING
 112extern unsigned long lockdep_count_forward_deps(struct lock_class *);
 113extern unsigned long lockdep_count_backward_deps(struct lock_class *);
 114#else
 115static inline unsigned long
 116lockdep_count_forward_deps(struct lock_class *class)
 117{
 118        return 0;
 119}
 120static inline unsigned long
 121lockdep_count_backward_deps(struct lock_class *class)
 122{
 123        return 0;
 124}
 125#endif
 126
 127#ifdef CONFIG_DEBUG_LOCKDEP
 128
 129#include <asm/local.h>
 130/*
 131 * Various lockdep statistics.
 132 * We want them per cpu as they are often accessed in fast path
 133 * and we want to avoid too much cache bouncing.
 134 */
 135struct lockdep_stats {
 136        int     chain_lookup_hits;
 137        int     chain_lookup_misses;
 138        int     hardirqs_on_events;
 139        int     hardirqs_off_events;
 140        int     redundant_hardirqs_on;
 141        int     redundant_hardirqs_off;
 142        int     softirqs_on_events;
 143        int     softirqs_off_events;
 144        int     redundant_softirqs_on;
 145        int     redundant_softirqs_off;
 146        int     nr_unused_locks;
 147        int     nr_redundant_checks;
 148        int     nr_redundant;
 149        int     nr_cyclic_checks;
 150        int     nr_cyclic_check_recursions;
 151        int     nr_find_usage_forwards_checks;
 152        int     nr_find_usage_forwards_recursions;
 153        int     nr_find_usage_backwards_checks;
 154        int     nr_find_usage_backwards_recursions;
 155
 156        /*
 157         * Per lock class locking operation stat counts
 158         */
 159        unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
 160};
 161
 162DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
 163extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 164
 165#define __debug_atomic_inc(ptr)                                 \
 166        this_cpu_inc(lockdep_stats.ptr);
 167
 168#define debug_atomic_inc(ptr)                   {               \
 169        WARN_ON_ONCE(!irqs_disabled());                         \
 170        __this_cpu_inc(lockdep_stats.ptr);                      \
 171}
 172
 173#define debug_atomic_dec(ptr)                   {               \
 174        WARN_ON_ONCE(!irqs_disabled());                         \
 175        __this_cpu_dec(lockdep_stats.ptr);                      \
 176}
 177
 178#define debug_atomic_read(ptr)          ({                              \
 179        struct lockdep_stats *__cpu_lockdep_stats;                      \
 180        unsigned long long __total = 0;                                 \
 181        int __cpu;                                                      \
 182        for_each_possible_cpu(__cpu) {                                  \
 183                __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);   \
 184                __total += __cpu_lockdep_stats->ptr;                    \
 185        }                                                               \
 186        __total;                                                        \
 187})
 188
 189static inline void debug_class_ops_inc(struct lock_class *class)
 190{
 191        int idx;
 192
 193        idx = class - lock_classes;
 194        __debug_atomic_inc(lock_class_ops[idx]);
 195}
 196
 197static inline unsigned long debug_class_ops_read(struct lock_class *class)
 198{
 199        int idx, cpu;
 200        unsigned long ops = 0;
 201
 202        idx = class - lock_classes;
 203        for_each_possible_cpu(cpu)
 204                ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
 205        return ops;
 206}
 207
 208#else
 209# define __debug_atomic_inc(ptr)        do { } while (0)
 210# define debug_atomic_inc(ptr)          do { } while (0)
 211# define debug_atomic_dec(ptr)          do { } while (0)
 212# define debug_atomic_read(ptr)         0
 213# define debug_class_ops_inc(ptr)       do { } while (0)
 214#endif
 215