linux/include/linux/lockdep.h
<<
>>
Prefs
   1/*
   2 * Runtime locking correctness validator
   3 *
   4 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   5 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
   6 *
   7 * see Documentation/locking/lockdep-design.txt for more details.
   8 */
   9#ifndef __LINUX_LOCKDEP_H
  10#define __LINUX_LOCKDEP_H
  11
  12struct task_struct;
  13struct lockdep_map;
  14
  15/* for sysctl */
  16extern int prove_locking;
  17extern int lock_stat;
  18
  19#ifdef CONFIG_LOCKDEP
  20
  21#include <linux/linkage.h>
  22#include <linux/list.h>
  23#include <linux/debug_locks.h>
  24#include <linux/stacktrace.h>
  25
  26/*
  27 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
  28 * the total number of states... :-(
  29 */
  30#define XXX_LOCK_USAGE_STATES           (1+3*4)
  31
  32#define MAX_LOCKDEP_SUBCLASSES          8UL
  33
  34/*
  35 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
  36 * cached in the instance of lockdep_map
  37 *
  38 * Currently main class (subclass == 0) and signle depth subclass
  39 * are cached in lockdep_map. This optimization is mainly targeting
  40 * on rq->lock. double_rq_lock() acquires this highly competitive with
  41 * single depth.
  42 */
  43#define NR_LOCKDEP_CACHING_CLASSES      2
  44
  45/*
  46 * Lock-classes are keyed via unique addresses, by embedding the
  47 * lockclass-key into the kernel (or module) .data section. (For
  48 * static locks we use the lock address itself as the key.)
  49 */
  50struct lockdep_subclass_key {
  51        char __one_byte;
  52} __attribute__ ((__packed__));
  53
  54struct lock_class_key {
  55        struct lockdep_subclass_key     subkeys[MAX_LOCKDEP_SUBCLASSES];
  56};
  57
  58extern struct lock_class_key __lockdep_no_validate__;
  59
  60#define LOCKSTAT_POINTS         4
  61
  62/*
  63 * The lock-class itself:
  64 */
  65struct lock_class {
  66        /*
  67         * class-hash:
  68         */
  69        struct hlist_node               hash_entry;
  70
  71        /*
  72         * global list of all lock-classes:
  73         */
  74        struct list_head                lock_entry;
  75
  76        struct lockdep_subclass_key     *key;
  77        unsigned int                    subclass;
  78        unsigned int                    dep_gen_id;
  79
  80        /*
  81         * IRQ/softirq usage tracking bits:
  82         */
  83        unsigned long                   usage_mask;
  84        struct stack_trace              usage_traces[XXX_LOCK_USAGE_STATES];
  85
  86        /*
  87         * These fields represent a directed graph of lock dependencies,
  88         * to every node we attach a list of "forward" and a list of
  89         * "backward" graph nodes.
  90         */
  91        struct list_head                locks_after, locks_before;
  92
  93        /*
  94         * Generation counter, when doing certain classes of graph walking,
  95         * to ensure that we check one node only once:
  96         */
  97        unsigned int                    version;
  98
  99        /*
 100         * Statistics counter:
 101         */
 102        unsigned long                   ops;
 103
 104        const char                      *name;
 105        int                             name_version;
 106
 107#ifdef CONFIG_LOCK_STAT
 108        unsigned long                   contention_point[LOCKSTAT_POINTS];
 109        unsigned long                   contending_point[LOCKSTAT_POINTS];
 110#endif
 111};
 112
 113#ifdef CONFIG_LOCK_STAT
 114struct lock_time {
 115        s64                             min;
 116        s64                             max;
 117        s64                             total;
 118        unsigned long                   nr;
 119};
 120
 121enum bounce_type {
 122        bounce_acquired_write,
 123        bounce_acquired_read,
 124        bounce_contended_write,
 125        bounce_contended_read,
 126        nr_bounce_types,
 127
 128        bounce_acquired = bounce_acquired_write,
 129        bounce_contended = bounce_contended_write,
 130};
 131
 132struct lock_class_stats {
 133        unsigned long                   contention_point[LOCKSTAT_POINTS];
 134        unsigned long                   contending_point[LOCKSTAT_POINTS];
 135        struct lock_time                read_waittime;
 136        struct lock_time                write_waittime;
 137        struct lock_time                read_holdtime;
 138        struct lock_time                write_holdtime;
 139        unsigned long                   bounces[nr_bounce_types];
 140};
 141
 142struct lock_class_stats lock_stats(struct lock_class *class);
 143void clear_lock_stats(struct lock_class *class);
 144#endif
 145
 146/*
 147 * Map the lock object (the lock instance) to the lock-class object.
 148 * This is embedded into specific lock instances:
 149 */
 150struct lockdep_map {
 151        struct lock_class_key           *key;
 152        struct lock_class               *class_cache[NR_LOCKDEP_CACHING_CLASSES];
 153        const char                      *name;
 154#ifdef CONFIG_LOCK_STAT
 155        int                             cpu;
 156        unsigned long                   ip;
 157#endif
 158};
 159
 160static inline void lockdep_copy_map(struct lockdep_map *to,
 161                                    struct lockdep_map *from)
 162{
 163        int i;
 164
 165        *to = *from;
 166        /*
 167         * Since the class cache can be modified concurrently we could observe
 168         * half pointers (64bit arch using 32bit copy insns). Therefore clear
 169         * the caches and take the performance hit.
 170         *
 171         * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
 172         *     that relies on cache abuse.
 173         */
 174        for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
 175                to->class_cache[i] = NULL;
 176}
 177
 178/*
 179 * Every lock has a list of other locks that were taken after it.
 180 * We only grow the list, never remove from it:
 181 */
 182struct lock_list {
 183        struct list_head                entry;
 184        struct lock_class               *class;
 185        struct stack_trace              trace;
 186        int                             distance;
 187
 188        /*
 189         * The parent field is used to implement breadth-first search, and the
 190         * bit 0 is reused to indicate if the lock has been accessed in BFS.
 191         */
 192        struct lock_list                *parent;
 193};
 194
 195/*
 196 * We record lock dependency chains, so that we can cache them:
 197 */
 198struct lock_chain {
 199        /* see BUILD_BUG_ON()s in lookup_chain_cache() */
 200        unsigned int                    irq_context :  2,
 201                                        depth       :  6,
 202                                        base        : 24;
 203        /* 4 byte hole */
 204        struct hlist_node               entry;
 205        u64                             chain_key;
 206};
 207
 208#define MAX_LOCKDEP_KEYS_BITS           13
 209/*
 210 * Subtract one because we offset hlock->class_idx by 1 in order
 211 * to make 0 mean no class. This avoids overflowing the class_idx
 212 * bitfield and hitting the BUG in hlock_class().
 213 */
 214#define MAX_LOCKDEP_KEYS                ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
 215
 216struct held_lock {
 217        /*
 218         * One-way hash of the dependency chain up to this point. We
 219         * hash the hashes step by step as the dependency chain grows.
 220         *
 221         * We use it for dependency-caching and we skip detection
 222         * passes and dependency-updates if there is a cache-hit, so
 223         * it is absolutely critical for 100% coverage of the validator
 224         * to have a unique key value for every unique dependency path
 225         * that can occur in the system, to make a unique hash value
 226         * as likely as possible - hence the 64-bit width.
 227         *
 228         * The task struct holds the current hash value (initialized
 229         * with zero), here we store the previous hash value:
 230         */
 231        u64                             prev_chain_key;
 232        unsigned long                   acquire_ip;
 233        struct lockdep_map              *instance;
 234        struct lockdep_map              *nest_lock;
 235#ifdef CONFIG_LOCK_STAT
 236        u64                             waittime_stamp;
 237        u64                             holdtime_stamp;
 238#endif
 239        unsigned int                    class_idx:MAX_LOCKDEP_KEYS_BITS;
 240        /*
 241         * The lock-stack is unified in that the lock chains of interrupt
 242         * contexts nest ontop of process context chains, but we 'separate'
 243         * the hashes by starting with 0 if we cross into an interrupt
 244         * context, and we also keep do not add cross-context lock
 245         * dependencies - the lock usage graph walking covers that area
 246         * anyway, and we'd just unnecessarily increase the number of
 247         * dependencies otherwise. [Note: hardirq and softirq contexts
 248         * are separated from each other too.]
 249         *
 250         * The following field is used to detect when we cross into an
 251         * interrupt context:
 252         */
 253        unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
 254        unsigned int trylock:1;                                         /* 16 bits */
 255
 256        unsigned int read:2;        /* see lock_acquire() comment */
 257        unsigned int check:1;       /* see lock_acquire() comment */
 258        unsigned int hardirqs_off:1;
 259        unsigned int references:12;                                     /* 32 bits */
 260        unsigned int pin_count;
 261};
 262
 263/*
 264 * Initialization, self-test and debugging-output methods:
 265 */
 266extern void lockdep_info(void);
 267extern void lockdep_reset(void);
 268extern void lockdep_reset_lock(struct lockdep_map *lock);
 269extern void lockdep_free_key_range(void *start, unsigned long size);
 270extern asmlinkage void lockdep_sys_exit(void);
 271
 272extern void lockdep_off(void);
 273extern void lockdep_on(void);
 274
 275/*
 276 * These methods are used by specific locking variants (spinlocks,
 277 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
 278 * to lockdep:
 279 */
 280
 281extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
 282                             struct lock_class_key *key, int subclass);
 283
 284/*
 285 * To initialize a lockdep_map statically use this macro.
 286 * Note that _name must not be NULL.
 287 */
 288#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
 289        { .name = (_name), .key = (void *)(_key), }
 290
 291/*
 292 * Reinitialize a lock key - for cases where there is special locking or
 293 * special initialization of locks so that the validator gets the scope
 294 * of dependencies wrong: they are either too broad (they need a class-split)
 295 * or they are too narrow (they suffer from a false class-split):
 296 */
 297#define lockdep_set_class(lock, key) \
 298                lockdep_init_map(&(lock)->dep_map, #key, key, 0)
 299#define lockdep_set_class_and_name(lock, key, name) \
 300                lockdep_init_map(&(lock)->dep_map, name, key, 0)
 301#define lockdep_set_class_and_subclass(lock, key, sub) \
 302                lockdep_init_map(&(lock)->dep_map, #key, key, sub)
 303#define lockdep_set_subclass(lock, sub) \
 304                lockdep_init_map(&(lock)->dep_map, #lock, \
 305                                 (lock)->dep_map.key, sub)
 306
 307#define lockdep_set_novalidate_class(lock) \
 308        lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
 309/*
 310 * Compare locking classes
 311 */
 312#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
 313
 314static inline int lockdep_match_key(struct lockdep_map *lock,
 315                                    struct lock_class_key *key)
 316{
 317        return lock->key == key;
 318}
 319
 320/*
 321 * Acquire a lock.
 322 *
 323 * Values for "read":
 324 *
 325 *   0: exclusive (write) acquire
 326 *   1: read-acquire (no recursion allowed)
 327 *   2: read-acquire with same-instance recursion allowed
 328 *
 329 * Values for check:
 330 *
 331 *   0: simple checks (freeing, held-at-exit-time, etc.)
 332 *   1: full validation
 333 */
 334extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 335                         int trylock, int read, int check,
 336                         struct lockdep_map *nest_lock, unsigned long ip);
 337
 338extern void lock_release(struct lockdep_map *lock, int nested,
 339                         unsigned long ip);
 340
 341#define lockdep_is_held(lock)   lock_is_held(&(lock)->dep_map)
 342
 343extern int lock_is_held(struct lockdep_map *lock);
 344
 345extern void lock_set_class(struct lockdep_map *lock, const char *name,
 346                           struct lock_class_key *key, unsigned int subclass,
 347                           unsigned long ip);
 348
 349static inline void lock_set_subclass(struct lockdep_map *lock,
 350                unsigned int subclass, unsigned long ip)
 351{
 352        lock_set_class(lock, lock->name, lock->key, subclass, ip);
 353}
 354
 355extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
 356extern void lockdep_clear_current_reclaim_state(void);
 357extern void lockdep_trace_alloc(gfp_t mask);
 358
 359extern void lock_pin_lock(struct lockdep_map *lock);
 360extern void lock_unpin_lock(struct lockdep_map *lock);
 361
 362# define INIT_LOCKDEP                           .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
 363
 364#define lockdep_depth(tsk)      (debug_locks ? (tsk)->lockdep_depth : 0)
 365
 366#define lockdep_assert_held(l)  do {                            \
 367                WARN_ON(debug_locks && !lockdep_is_held(l));    \
 368        } while (0)
 369
 370#define lockdep_assert_held_once(l)     do {                            \
 371                WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));       \
 372        } while (0)
 373
 374#define lockdep_recursing(tsk)  ((tsk)->lockdep_recursion)
 375
 376#define lockdep_pin_lock(l)             lock_pin_lock(&(l)->dep_map)
 377#define lockdep_unpin_lock(l)   lock_unpin_lock(&(l)->dep_map)
 378
 379#else /* !CONFIG_LOCKDEP */
 380
 381static inline void lockdep_off(void)
 382{
 383}
 384
 385static inline void lockdep_on(void)
 386{
 387}
 388
 389# define lock_acquire(l, s, t, r, c, n, i)      do { } while (0)
 390# define lock_release(l, n, i)                  do { } while (0)
 391# define lock_set_class(l, n, k, s, i)          do { } while (0)
 392# define lock_set_subclass(l, s, i)             do { } while (0)
 393# define lockdep_set_current_reclaim_state(g)   do { } while (0)
 394# define lockdep_clear_current_reclaim_state()  do { } while (0)
 395# define lockdep_trace_alloc(g)                 do { } while (0)
 396# define lockdep_info()                         do { } while (0)
 397# define lockdep_init_map(lock, name, key, sub) \
 398                do { (void)(name); (void)(key); } while (0)
 399# define lockdep_set_class(lock, key)           do { (void)(key); } while (0)
 400# define lockdep_set_class_and_name(lock, key, name) \
 401                do { (void)(key); (void)(name); } while (0)
 402#define lockdep_set_class_and_subclass(lock, key, sub) \
 403                do { (void)(key); } while (0)
 404#define lockdep_set_subclass(lock, sub)         do { } while (0)
 405
 406#define lockdep_set_novalidate_class(lock) do { } while (0)
 407
 408/*
 409 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
 410 * case since the result is not well defined and the caller should rather
 411 * #ifdef the call himself.
 412 */
 413
 414# define INIT_LOCKDEP
 415# define lockdep_reset()                do { debug_locks = 1; } while (0)
 416# define lockdep_free_key_range(start, size)    do { } while (0)
 417# define lockdep_sys_exit()                     do { } while (0)
 418/*
 419 * The class key takes no space if lockdep is disabled:
 420 */
 421struct lock_class_key { };
 422
 423#define lockdep_depth(tsk)      (0)
 424
 425#define lockdep_assert_held(l)                  do { (void)(l); } while (0)
 426#define lockdep_assert_held_once(l)             do { (void)(l); } while (0)
 427
 428#define lockdep_recursing(tsk)                  (0)
 429
 430#define lockdep_pin_lock(l)                             do { (void)(l); } while (0)
 431#define lockdep_unpin_lock(l)                   do { (void)(l); } while (0)
 432
 433#endif /* !LOCKDEP */
 434
 435#ifdef CONFIG_LOCK_STAT
 436
 437extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
 438extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
 439
 440#define LOCK_CONTENDED(_lock, try, lock)                        \
 441do {                                                            \
 442        if (!try(_lock)) {                                      \
 443                lock_contended(&(_lock)->dep_map, _RET_IP_);    \
 444                lock(_lock);                                    \
 445        }                                                       \
 446        lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
 447} while (0)
 448
 449#else /* CONFIG_LOCK_STAT */
 450
 451#define lock_contended(lockdep_map, ip) do {} while (0)
 452#define lock_acquired(lockdep_map, ip) do {} while (0)
 453
 454#define LOCK_CONTENDED(_lock, try, lock) \
 455        lock(_lock)
 456
 457#endif /* CONFIG_LOCK_STAT */
 458
 459#ifdef CONFIG_LOCKDEP
 460
 461/*
 462 * On lockdep we dont want the hand-coded irq-enable of
 463 * _raw_*_lock_flags() code, because lockdep assumes
 464 * that interrupts are not re-enabled during lock-acquire:
 465 */
 466#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
 467        LOCK_CONTENDED((_lock), (try), (lock))
 468
 469#else /* CONFIG_LOCKDEP */
 470
 471#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
 472        lockfl((_lock), (flags))
 473
 474#endif /* CONFIG_LOCKDEP */
 475
 476#ifdef CONFIG_TRACE_IRQFLAGS
 477extern void print_irqtrace_events(struct task_struct *curr);
 478#else
 479static inline void print_irqtrace_events(struct task_struct *curr)
 480{
 481}
 482#endif
 483
 484/*
 485 * For trivial one-depth nesting of a lock-class, the following
 486 * global define can be used. (Subsystems with multiple levels
 487 * of nesting should define their own lock-nesting subclasses.)
 488 */
 489#define SINGLE_DEPTH_NESTING                    1
 490
 491/*
 492 * Map the dependency ops to NOP or to real lockdep ops, depending
 493 * on the per lock-class debug mode:
 494 */
 495
 496#define lock_acquire_exclusive(l, s, t, n, i)           lock_acquire(l, s, t, 0, 1, n, i)
 497#define lock_acquire_shared(l, s, t, n, i)              lock_acquire(l, s, t, 1, 1, n, i)
 498#define lock_acquire_shared_recursive(l, s, t, n, i)    lock_acquire(l, s, t, 2, 1, n, i)
 499
 500#define spin_acquire(l, s, t, i)                lock_acquire_exclusive(l, s, t, NULL, i)
 501#define spin_acquire_nest(l, s, t, n, i)        lock_acquire_exclusive(l, s, t, n, i)
 502#define spin_release(l, n, i)                   lock_release(l, n, i)
 503
 504#define rwlock_acquire(l, s, t, i)              lock_acquire_exclusive(l, s, t, NULL, i)
 505#define rwlock_acquire_read(l, s, t, i)         lock_acquire_shared_recursive(l, s, t, NULL, i)
 506#define rwlock_release(l, n, i)                 lock_release(l, n, i)
 507
 508#define seqcount_acquire(l, s, t, i)            lock_acquire_exclusive(l, s, t, NULL, i)
 509#define seqcount_acquire_read(l, s, t, i)       lock_acquire_shared_recursive(l, s, t, NULL, i)
 510#define seqcount_release(l, n, i)               lock_release(l, n, i)
 511
 512#define mutex_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
 513#define mutex_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
 514#define mutex_release(l, n, i)                  lock_release(l, n, i)
 515
 516#define rwsem_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
 517#define rwsem_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
 518#define rwsem_acquire_read(l, s, t, i)          lock_acquire_shared(l, s, t, NULL, i)
 519#define rwsem_release(l, n, i)                  lock_release(l, n, i)
 520
 521#define lock_map_acquire(l)                     lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
 522#define lock_map_acquire_read(l)                lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
 523#define lock_map_acquire_tryread(l)             lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
 524#define lock_map_release(l)                     lock_release(l, 1, _THIS_IP_)
 525
 526#ifdef CONFIG_PROVE_LOCKING
 527# define might_lock(lock)                                               \
 528do {                                                                    \
 529        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
 530        lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
 531        lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
 532} while (0)
 533# define might_lock_read(lock)                                          \
 534do {                                                                    \
 535        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
 536        lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
 537        lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
 538} while (0)
 539#else
 540# define might_lock(lock) do { } while (0)
 541# define might_lock_read(lock) do { } while (0)
 542#endif
 543
 544#ifdef CONFIG_LOCKDEP
 545void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
 546#else
 547static inline void
 548lockdep_rcu_suspicious(const char *file, const int line, const char *s)
 549{
 550}
 551#endif
 552
 553#endif /* __LINUX_LOCKDEP_H */
 554