linux/include/linux/lockdep.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Runtime locking correctness validator
   4 *
   5 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   6 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
   7 *
   8 * see Documentation/locking/lockdep-design.txt for more details.
   9 */
  10#ifndef __LINUX_LOCKDEP_H
  11#define __LINUX_LOCKDEP_H
  12
  13struct task_struct;
  14struct lockdep_map;
  15
  16/* for sysctl */
  17extern int prove_locking;
  18extern int lock_stat;
  19
  20#define MAX_LOCKDEP_SUBCLASSES          8UL
  21
  22#include <linux/types.h>
  23
  24#ifdef CONFIG_LOCKDEP
  25
  26#include <linux/linkage.h>
  27#include <linux/list.h>
  28#include <linux/debug_locks.h>
  29#include <linux/stacktrace.h>
  30
  31/*
  32 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
  33 * the total number of states... :-(
  34 */
  35#define XXX_LOCK_USAGE_STATES           (1+2*4)
  36
  37/*
  38 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
  39 * cached in the instance of lockdep_map
  40 *
  41 * Currently main class (subclass == 0) and signle depth subclass
  42 * are cached in lockdep_map. This optimization is mainly targeting
  43 * on rq->lock. double_rq_lock() acquires this highly competitive with
  44 * single depth.
  45 */
  46#define NR_LOCKDEP_CACHING_CLASSES      2
  47
  48/*
  49 * Lock-classes are keyed via unique addresses, by embedding the
  50 * lockclass-key into the kernel (or module) .data section. (For
  51 * static locks we use the lock address itself as the key.)
  52 */
  53struct lockdep_subclass_key {
  54        char __one_byte;
  55} __attribute__ ((__packed__));
  56
  57struct lock_class_key {
  58        struct lockdep_subclass_key     subkeys[MAX_LOCKDEP_SUBCLASSES];
  59};
  60
  61extern struct lock_class_key __lockdep_no_validate__;
  62
  63#define LOCKSTAT_POINTS         4
  64
  65/*
  66 * The lock-class itself:
  67 */
  68struct lock_class {
  69        /*
  70         * class-hash:
  71         */
  72        struct hlist_node               hash_entry;
  73
  74        /*
  75         * global list of all lock-classes:
  76         */
  77        struct list_head                lock_entry;
  78
  79        struct lockdep_subclass_key     *key;
  80        unsigned int                    subclass;
  81        unsigned int                    dep_gen_id;
  82
  83        /*
  84         * IRQ/softirq usage tracking bits:
  85         */
  86        unsigned long                   usage_mask;
  87        struct stack_trace              usage_traces[XXX_LOCK_USAGE_STATES];
  88
  89        /*
  90         * These fields represent a directed graph of lock dependencies,
  91         * to every node we attach a list of "forward" and a list of
  92         * "backward" graph nodes.
  93         */
  94        struct list_head                locks_after, locks_before;
  95
  96        /*
  97         * Generation counter, when doing certain classes of graph walking,
  98         * to ensure that we check one node only once:
  99         */
 100        unsigned int                    version;
 101
 102        /*
 103         * Statistics counter:
 104         */
 105        unsigned long                   ops;
 106
 107        const char                      *name;
 108        int                             name_version;
 109
 110#ifdef CONFIG_LOCK_STAT
 111        unsigned long                   contention_point[LOCKSTAT_POINTS];
 112        unsigned long                   contending_point[LOCKSTAT_POINTS];
 113#endif
 114};
 115
 116#ifdef CONFIG_LOCK_STAT
 117struct lock_time {
 118        s64                             min;
 119        s64                             max;
 120        s64                             total;
 121        unsigned long                   nr;
 122};
 123
 124enum bounce_type {
 125        bounce_acquired_write,
 126        bounce_acquired_read,
 127        bounce_contended_write,
 128        bounce_contended_read,
 129        nr_bounce_types,
 130
 131        bounce_acquired = bounce_acquired_write,
 132        bounce_contended = bounce_contended_write,
 133};
 134
 135struct lock_class_stats {
 136        unsigned long                   contention_point[LOCKSTAT_POINTS];
 137        unsigned long                   contending_point[LOCKSTAT_POINTS];
 138        struct lock_time                read_waittime;
 139        struct lock_time                write_waittime;
 140        struct lock_time                read_holdtime;
 141        struct lock_time                write_holdtime;
 142        unsigned long                   bounces[nr_bounce_types];
 143};
 144
 145struct lock_class_stats lock_stats(struct lock_class *class);
 146void clear_lock_stats(struct lock_class *class);
 147#endif
 148
 149/*
 150 * Map the lock object (the lock instance) to the lock-class object.
 151 * This is embedded into specific lock instances:
 152 */
 153struct lockdep_map {
 154        struct lock_class_key           *key;
 155        struct lock_class               *class_cache[NR_LOCKDEP_CACHING_CLASSES];
 156        const char                      *name;
 157#ifdef CONFIG_LOCK_STAT
 158        int                             cpu;
 159        unsigned long                   ip;
 160#endif
 161#ifdef CONFIG_LOCKDEP_CROSSRELEASE
 162        /*
 163         * Whether it's a crosslock.
 164         */
 165        int                             cross;
 166#endif
 167};
 168
 169static inline void lockdep_copy_map(struct lockdep_map *to,
 170                                    struct lockdep_map *from)
 171{
 172        int i;
 173
 174        *to = *from;
 175        /*
 176         * Since the class cache can be modified concurrently we could observe
 177         * half pointers (64bit arch using 32bit copy insns). Therefore clear
 178         * the caches and take the performance hit.
 179         *
 180         * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
 181         *     that relies on cache abuse.
 182         */
 183        for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
 184                to->class_cache[i] = NULL;
 185}
 186
 187/*
 188 * Every lock has a list of other locks that were taken after it.
 189 * We only grow the list, never remove from it:
 190 */
 191struct lock_list {
 192        struct list_head                entry;
 193        struct lock_class               *class;
 194        struct stack_trace              trace;
 195        int                             distance;
 196
 197        /*
 198         * The parent field is used to implement breadth-first search, and the
 199         * bit 0 is reused to indicate if the lock has been accessed in BFS.
 200         */
 201        struct lock_list                *parent;
 202};
 203
 204/*
 205 * We record lock dependency chains, so that we can cache them:
 206 */
 207struct lock_chain {
 208        /* see BUILD_BUG_ON()s in lookup_chain_cache() */
 209        unsigned int                    irq_context :  2,
 210                                        depth       :  6,
 211                                        base        : 24;
 212        /* 4 byte hole */
 213        struct hlist_node               entry;
 214        u64                             chain_key;
 215};
 216
 217#define MAX_LOCKDEP_KEYS_BITS           13
 218/*
 219 * Subtract one because we offset hlock->class_idx by 1 in order
 220 * to make 0 mean no class. This avoids overflowing the class_idx
 221 * bitfield and hitting the BUG in hlock_class().
 222 */
 223#define MAX_LOCKDEP_KEYS                ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
 224
 225struct held_lock {
 226        /*
 227         * One-way hash of the dependency chain up to this point. We
 228         * hash the hashes step by step as the dependency chain grows.
 229         *
 230         * We use it for dependency-caching and we skip detection
 231         * passes and dependency-updates if there is a cache-hit, so
 232         * it is absolutely critical for 100% coverage of the validator
 233         * to have a unique key value for every unique dependency path
 234         * that can occur in the system, to make a unique hash value
 235         * as likely as possible - hence the 64-bit width.
 236         *
 237         * The task struct holds the current hash value (initialized
 238         * with zero), here we store the previous hash value:
 239         */
 240        u64                             prev_chain_key;
 241        unsigned long                   acquire_ip;
 242        struct lockdep_map              *instance;
 243        struct lockdep_map              *nest_lock;
 244#ifdef CONFIG_LOCK_STAT
 245        u64                             waittime_stamp;
 246        u64                             holdtime_stamp;
 247#endif
 248        unsigned int                    class_idx:MAX_LOCKDEP_KEYS_BITS;
 249        /*
 250         * The lock-stack is unified in that the lock chains of interrupt
 251         * contexts nest ontop of process context chains, but we 'separate'
 252         * the hashes by starting with 0 if we cross into an interrupt
 253         * context, and we also keep do not add cross-context lock
 254         * dependencies - the lock usage graph walking covers that area
 255         * anyway, and we'd just unnecessarily increase the number of
 256         * dependencies otherwise. [Note: hardirq and softirq contexts
 257         * are separated from each other too.]
 258         *
 259         * The following field is used to detect when we cross into an
 260         * interrupt context:
 261         */
 262        unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
 263        unsigned int trylock:1;                                         /* 16 bits */
 264
 265        unsigned int read:2;        /* see lock_acquire() comment */
 266        unsigned int check:1;       /* see lock_acquire() comment */
 267        unsigned int hardirqs_off:1;
 268        unsigned int references:12;                                     /* 32 bits */
 269        unsigned int pin_count;
 270#ifdef CONFIG_LOCKDEP_CROSSRELEASE
 271        /*
 272         * Generation id.
 273         *
 274         * A value of cross_gen_id will be stored when holding this,
 275         * which is globally increased whenever each crosslock is held.
 276         */
 277        unsigned int gen_id;
 278#endif
 279};
 280
 281#ifdef CONFIG_LOCKDEP_CROSSRELEASE
 282#define MAX_XHLOCK_TRACE_ENTRIES 5
 283
 284/*
 285 * This is for keeping locks waiting for commit so that true dependencies
 286 * can be added at commit step.
 287 */
 288struct hist_lock {
 289        /*
 290         * Id for each entry in the ring buffer. This is used to
 291         * decide whether the ring buffer was overwritten or not.
 292         *
 293         * For example,
 294         *
 295         *           |<----------- hist_lock ring buffer size ------->|
 296         *           pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
 297         * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
 298         *
 299         *           where 'p' represents an acquisition in process
 300         *           context, 'i' represents an acquisition in irq
 301         *           context.
 302         *
 303         * In this example, the ring buffer was overwritten by
 304         * acquisitions in irq context, that should be detected on
 305         * rollback or commit.
 306         */
 307        unsigned int hist_id;
 308
 309        /*
 310         * Seperate stack_trace data. This will be used at commit step.
 311         */
 312        struct stack_trace      trace;
 313        unsigned long           trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
 314
 315        /*
 316         * Seperate hlock instance. This will be used at commit step.
 317         *
 318         * TODO: Use a smaller data structure containing only necessary
 319         * data. However, we should make lockdep code able to handle the
 320         * smaller one first.
 321         */
 322        struct held_lock        hlock;
 323};
 324
 325/*
 326 * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
 327 * be called instead of lockdep_init_map().
 328 */
 329struct cross_lock {
 330        /*
 331         * When more than one acquisition of crosslocks are overlapped,
 332         * we have to perform commit for them based on cross_gen_id of
 333         * the first acquisition, which allows us to add more true
 334         * dependencies.
 335         *
 336         * Moreover, when no acquisition of a crosslock is in progress,
 337         * we should not perform commit because the lock might not exist
 338         * any more, which might cause incorrect memory access. So we
 339         * have to track the number of acquisitions of a crosslock.
 340         */
 341        int nr_acquire;
 342
 343        /*
 344         * Seperate hlock instance. This will be used at commit step.
 345         *
 346         * TODO: Use a smaller data structure containing only necessary
 347         * data. However, we should make lockdep code able to handle the
 348         * smaller one first.
 349         */
 350        struct held_lock        hlock;
 351};
 352
 353struct lockdep_map_cross {
 354        struct lockdep_map map;
 355        struct cross_lock xlock;
 356};
 357#endif
 358
 359/*
 360 * Initialization, self-test and debugging-output methods:
 361 */
 362extern void lockdep_info(void);
 363extern void lockdep_reset(void);
 364extern void lockdep_reset_lock(struct lockdep_map *lock);
 365extern void lockdep_free_key_range(void *start, unsigned long size);
 366extern asmlinkage void lockdep_sys_exit(void);
 367
 368extern void lockdep_off(void);
 369extern void lockdep_on(void);
 370
 371/*
 372 * These methods are used by specific locking variants (spinlocks,
 373 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
 374 * to lockdep:
 375 */
 376
 377extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
 378                             struct lock_class_key *key, int subclass);
 379
 380/*
 381 * Reinitialize a lock key - for cases where there is special locking or
 382 * special initialization of locks so that the validator gets the scope
 383 * of dependencies wrong: they are either too broad (they need a class-split)
 384 * or they are too narrow (they suffer from a false class-split):
 385 */
 386#define lockdep_set_class(lock, key) \
 387                lockdep_init_map(&(lock)->dep_map, #key, key, 0)
 388#define lockdep_set_class_and_name(lock, key, name) \
 389                lockdep_init_map(&(lock)->dep_map, name, key, 0)
 390#define lockdep_set_class_and_subclass(lock, key, sub) \
 391                lockdep_init_map(&(lock)->dep_map, #key, key, sub)
 392#define lockdep_set_subclass(lock, sub) \
 393                lockdep_init_map(&(lock)->dep_map, #lock, \
 394                                 (lock)->dep_map.key, sub)
 395
 396#define lockdep_set_novalidate_class(lock) \
 397        lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
 398/*
 399 * Compare locking classes
 400 */
 401#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
 402
 403static inline int lockdep_match_key(struct lockdep_map *lock,
 404                                    struct lock_class_key *key)
 405{
 406        return lock->key == key;
 407}
 408
 409/*
 410 * Acquire a lock.
 411 *
 412 * Values for "read":
 413 *
 414 *   0: exclusive (write) acquire
 415 *   1: read-acquire (no recursion allowed)
 416 *   2: read-acquire with same-instance recursion allowed
 417 *
 418 * Values for check:
 419 *
 420 *   0: simple checks (freeing, held-at-exit-time, etc.)
 421 *   1: full validation
 422 */
 423extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 424                         int trylock, int read, int check,
 425                         struct lockdep_map *nest_lock, unsigned long ip);
 426
 427extern void lock_release(struct lockdep_map *lock, int nested,
 428                         unsigned long ip);
 429
 430/*
 431 * Same "read" as for lock_acquire(), except -1 means any.
 432 */
 433extern int lock_is_held_type(struct lockdep_map *lock, int read);
 434
 435static inline int lock_is_held(struct lockdep_map *lock)
 436{
 437        return lock_is_held_type(lock, -1);
 438}
 439
 440#define lockdep_is_held(lock)           lock_is_held(&(lock)->dep_map)
 441#define lockdep_is_held_type(lock, r)   lock_is_held_type(&(lock)->dep_map, (r))
 442
 443extern void lock_set_class(struct lockdep_map *lock, const char *name,
 444                           struct lock_class_key *key, unsigned int subclass,
 445                           unsigned long ip);
 446
 447static inline void lock_set_subclass(struct lockdep_map *lock,
 448                unsigned int subclass, unsigned long ip)
 449{
 450        lock_set_class(lock, lock->name, lock->key, subclass, ip);
 451}
 452
 453extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
 454
 455struct pin_cookie { unsigned int val; };
 456
 457#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
 458
 459extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
 460extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
 461extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
 462
 463# define INIT_LOCKDEP                           .lockdep_recursion = 0,
 464
 465#define lockdep_depth(tsk)      (debug_locks ? (tsk)->lockdep_depth : 0)
 466
 467#define lockdep_assert_held(l)  do {                            \
 468                WARN_ON(debug_locks && !lockdep_is_held(l));    \
 469        } while (0)
 470
 471#define lockdep_assert_held_exclusive(l)        do {                    \
 472                WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));    \
 473        } while (0)
 474
 475#define lockdep_assert_held_read(l)     do {                            \
 476                WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));    \
 477        } while (0)
 478
 479#define lockdep_assert_held_once(l)     do {                            \
 480                WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));       \
 481        } while (0)
 482
 483#define lockdep_recursing(tsk)  ((tsk)->lockdep_recursion)
 484
 485#define lockdep_pin_lock(l)     lock_pin_lock(&(l)->dep_map)
 486#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
 487#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
 488
 489#else /* !CONFIG_LOCKDEP */
 490
 491static inline void lockdep_off(void)
 492{
 493}
 494
 495static inline void lockdep_on(void)
 496{
 497}
 498
 499# define lock_acquire(l, s, t, r, c, n, i)      do { } while (0)
 500# define lock_release(l, n, i)                  do { } while (0)
 501# define lock_downgrade(l, i)                   do { } while (0)
 502# define lock_set_class(l, n, k, s, i)          do { } while (0)
 503# define lock_set_subclass(l, s, i)             do { } while (0)
 504# define lockdep_info()                         do { } while (0)
 505# define lockdep_init_map(lock, name, key, sub) \
 506                do { (void)(name); (void)(key); } while (0)
 507# define lockdep_set_class(lock, key)           do { (void)(key); } while (0)
 508# define lockdep_set_class_and_name(lock, key, name) \
 509                do { (void)(key); (void)(name); } while (0)
 510#define lockdep_set_class_and_subclass(lock, key, sub) \
 511                do { (void)(key); } while (0)
 512#define lockdep_set_subclass(lock, sub)         do { } while (0)
 513
 514#define lockdep_set_novalidate_class(lock) do { } while (0)
 515
 516/*
 517 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
 518 * case since the result is not well defined and the caller should rather
 519 * #ifdef the call himself.
 520 */
 521
 522# define INIT_LOCKDEP
 523# define lockdep_reset()                do { debug_locks = 1; } while (0)
 524# define lockdep_free_key_range(start, size)    do { } while (0)
 525# define lockdep_sys_exit()                     do { } while (0)
 526/*
 527 * The class key takes no space if lockdep is disabled:
 528 */
 529struct lock_class_key { };
 530
 531#define lockdep_depth(tsk)      (0)
 532
 533#define lockdep_is_held_type(l, r)              (1)
 534
 535#define lockdep_assert_held(l)                  do { (void)(l); } while (0)
 536#define lockdep_assert_held_exclusive(l)        do { (void)(l); } while (0)
 537#define lockdep_assert_held_read(l)             do { (void)(l); } while (0)
 538#define lockdep_assert_held_once(l)             do { (void)(l); } while (0)
 539
 540#define lockdep_recursing(tsk)                  (0)
 541
 542struct pin_cookie { };
 543
 544#define NIL_COOKIE (struct pin_cookie){ }
 545
 546#define lockdep_pin_lock(l)                     ({ struct pin_cookie cookie; cookie; })
 547#define lockdep_repin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
 548#define lockdep_unpin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
 549
 550#endif /* !LOCKDEP */
 551
 552enum xhlock_context_t {
 553        XHLOCK_HARD,
 554        XHLOCK_SOFT,
 555        XHLOCK_CTX_NR,
 556};
 557
 558#ifdef CONFIG_LOCKDEP_CROSSRELEASE
 559extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
 560                                       const char *name,
 561                                       struct lock_class_key *key,
 562                                       int subclass);
 563extern void lock_commit_crosslock(struct lockdep_map *lock);
 564
 565/*
 566 * What we essencially have to initialize is 'nr_acquire'. Other members
 567 * will be initialized in add_xlock().
 568 */
 569#define STATIC_CROSS_LOCK_INIT() \
 570        { .nr_acquire = 0,}
 571
 572#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
 573        { .map.name = (_name), .map.key = (void *)(_key), \
 574          .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
 575
 576/*
 577 * To initialize a lockdep_map statically use this macro.
 578 * Note that _name must not be NULL.
 579 */
 580#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
 581        { .name = (_name), .key = (void *)(_key), .cross = 0, }
 582
 583extern void crossrelease_hist_start(enum xhlock_context_t c);
 584extern void crossrelease_hist_end(enum xhlock_context_t c);
 585extern void lockdep_invariant_state(bool force);
 586extern void lockdep_init_task(struct task_struct *task);
 587extern void lockdep_free_task(struct task_struct *task);
 588#else /* !CROSSRELEASE */
 589#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
 590/*
 591 * To initialize a lockdep_map statically use this macro.
 592 * Note that _name must not be NULL.
 593 */
 594#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
 595        { .name = (_name), .key = (void *)(_key), }
 596
 597static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
 598static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
 599static inline void lockdep_invariant_state(bool force) {}
 600static inline void lockdep_init_task(struct task_struct *task) {}
 601static inline void lockdep_free_task(struct task_struct *task) {}
 602#endif /* CROSSRELEASE */
 603
 604#ifdef CONFIG_LOCK_STAT
 605
 606extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
 607extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
 608
 609#define LOCK_CONTENDED(_lock, try, lock)                        \
 610do {                                                            \
 611        if (!try(_lock)) {                                      \
 612                lock_contended(&(_lock)->dep_map, _RET_IP_);    \
 613                lock(_lock);                                    \
 614        }                                                       \
 615        lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
 616} while (0)
 617
 618#define LOCK_CONTENDED_RETURN(_lock, try, lock)                 \
 619({                                                              \
 620        int ____err = 0;                                        \
 621        if (!try(_lock)) {                                      \
 622                lock_contended(&(_lock)->dep_map, _RET_IP_);    \
 623                ____err = lock(_lock);                          \
 624        }                                                       \
 625        if (!____err)                                           \
 626                lock_acquired(&(_lock)->dep_map, _RET_IP_);     \
 627        ____err;                                                \
 628})
 629
 630#else /* CONFIG_LOCK_STAT */
 631
 632#define lock_contended(lockdep_map, ip) do {} while (0)
 633#define lock_acquired(lockdep_map, ip) do {} while (0)
 634
 635#define LOCK_CONTENDED(_lock, try, lock) \
 636        lock(_lock)
 637
 638#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
 639        lock(_lock)
 640
 641#endif /* CONFIG_LOCK_STAT */
 642
 643#ifdef CONFIG_LOCKDEP
 644
 645/*
 646 * On lockdep we dont want the hand-coded irq-enable of
 647 * _raw_*_lock_flags() code, because lockdep assumes
 648 * that interrupts are not re-enabled during lock-acquire:
 649 */
 650#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
 651        LOCK_CONTENDED((_lock), (try), (lock))
 652
 653#else /* CONFIG_LOCKDEP */
 654
 655#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
 656        lockfl((_lock), (flags))
 657
 658#endif /* CONFIG_LOCKDEP */
 659
 660#ifdef CONFIG_TRACE_IRQFLAGS
 661extern void print_irqtrace_events(struct task_struct *curr);
 662#else
 663static inline void print_irqtrace_events(struct task_struct *curr)
 664{
 665}
 666#endif
 667
 668/*
 669 * For trivial one-depth nesting of a lock-class, the following
 670 * global define can be used. (Subsystems with multiple levels
 671 * of nesting should define their own lock-nesting subclasses.)
 672 */
 673#define SINGLE_DEPTH_NESTING                    1
 674
 675/*
 676 * Map the dependency ops to NOP or to real lockdep ops, depending
 677 * on the per lock-class debug mode:
 678 */
 679
 680#define lock_acquire_exclusive(l, s, t, n, i)           lock_acquire(l, s, t, 0, 1, n, i)
 681#define lock_acquire_shared(l, s, t, n, i)              lock_acquire(l, s, t, 1, 1, n, i)
 682#define lock_acquire_shared_recursive(l, s, t, n, i)    lock_acquire(l, s, t, 2, 1, n, i)
 683
 684#define spin_acquire(l, s, t, i)                lock_acquire_exclusive(l, s, t, NULL, i)
 685#define spin_acquire_nest(l, s, t, n, i)        lock_acquire_exclusive(l, s, t, n, i)
 686#define spin_release(l, n, i)                   lock_release(l, n, i)
 687
 688#define rwlock_acquire(l, s, t, i)              lock_acquire_exclusive(l, s, t, NULL, i)
 689#define rwlock_acquire_read(l, s, t, i)         lock_acquire_shared_recursive(l, s, t, NULL, i)
 690#define rwlock_release(l, n, i)                 lock_release(l, n, i)
 691
 692#define seqcount_acquire(l, s, t, i)            lock_acquire_exclusive(l, s, t, NULL, i)
 693#define seqcount_acquire_read(l, s, t, i)       lock_acquire_shared_recursive(l, s, t, NULL, i)
 694#define seqcount_release(l, n, i)               lock_release(l, n, i)
 695
 696#define mutex_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
 697#define mutex_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
 698#define mutex_release(l, n, i)                  lock_release(l, n, i)
 699
 700#define rwsem_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
 701#define rwsem_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
 702#define rwsem_acquire_read(l, s, t, i)          lock_acquire_shared(l, s, t, NULL, i)
 703#define rwsem_release(l, n, i)                  lock_release(l, n, i)
 704
 705#define lock_map_acquire(l)                     lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
 706#define lock_map_acquire_read(l)                lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
 707#define lock_map_acquire_tryread(l)             lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
 708#define lock_map_release(l)                     lock_release(l, 1, _THIS_IP_)
 709
 710#ifdef CONFIG_PROVE_LOCKING
 711# define might_lock(lock)                                               \
 712do {                                                                    \
 713        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
 714        lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
 715        lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
 716} while (0)
 717# define might_lock_read(lock)                                          \
 718do {                                                                    \
 719        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
 720        lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
 721        lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
 722} while (0)
 723#else
 724# define might_lock(lock) do { } while (0)
 725# define might_lock_read(lock) do { } while (0)
 726#endif
 727
 728#ifdef CONFIG_LOCKDEP
 729void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
 730#else
 731static inline void
 732lockdep_rcu_suspicious(const char *file, const int line, const char *s)
 733{
 734}
 735#endif
 736
 737#endif /* __LINUX_LOCKDEP_H */
 738