linux/include/linux/lockdep.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Runtime locking correctness validator
   4 *
   5 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   6 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
   7 *
   8 * see Documentation/locking/lockdep-design.rst for more details.
   9 */
  10#ifndef __LINUX_LOCKDEP_H
  11#define __LINUX_LOCKDEP_H
  12
  13struct task_struct;
  14struct lockdep_map;
  15
  16/* for sysctl */
  17extern int prove_locking;
  18extern int lock_stat;
  19
  20#define MAX_LOCKDEP_SUBCLASSES          8UL
  21
  22#include <linux/types.h>
  23
  24#ifdef CONFIG_LOCKDEP
  25
  26#include <linux/linkage.h>
  27#include <linux/list.h>
  28#include <linux/debug_locks.h>
  29#include <linux/stacktrace.h>
  30
  31/*
  32 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
  33 * the total number of states... :-(
  34 */
  35#define XXX_LOCK_USAGE_STATES           (1+2*4)
  36
  37/*
  38 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
  39 * cached in the instance of lockdep_map
  40 *
  41 * Currently main class (subclass == 0) and signle depth subclass
  42 * are cached in lockdep_map. This optimization is mainly targeting
  43 * on rq->lock. double_rq_lock() acquires this highly competitive with
  44 * single depth.
  45 */
  46#define NR_LOCKDEP_CACHING_CLASSES      2
  47
  48/*
  49 * A lockdep key is associated with each lock object. For static locks we use
  50 * the lock address itself as the key. Dynamically allocated lock objects can
  51 * have a statically or dynamically allocated key. Dynamically allocated lock
  52 * keys must be registered before being used and must be unregistered before
  53 * the key memory is freed.
  54 */
  55struct lockdep_subclass_key {
  56        char __one_byte;
  57} __attribute__ ((__packed__));
  58
  59/* hash_entry is used to keep track of dynamically allocated keys. */
  60struct lock_class_key {
  61        union {
  62                struct hlist_node               hash_entry;
  63                struct lockdep_subclass_key     subkeys[MAX_LOCKDEP_SUBCLASSES];
  64        };
  65};
  66
  67extern struct lock_class_key __lockdep_no_validate__;
  68
  69struct lock_trace;
  70
  71#define LOCKSTAT_POINTS         4
  72
  73/*
  74 * The lock-class itself. The order of the structure members matters.
  75 * reinit_class() zeroes the key member and all subsequent members.
  76 */
  77struct lock_class {
  78        /*
  79         * class-hash:
  80         */
  81        struct hlist_node               hash_entry;
  82
  83        /*
  84         * Entry in all_lock_classes when in use. Entry in free_lock_classes
  85         * when not in use. Instances that are being freed are on one of the
  86         * zapped_classes lists.
  87         */
  88        struct list_head                lock_entry;
  89
  90        /*
  91         * These fields represent a directed graph of lock dependencies,
  92         * to every node we attach a list of "forward" and a list of
  93         * "backward" graph nodes.
  94         */
  95        struct list_head                locks_after, locks_before;
  96
  97        const struct lockdep_subclass_key *key;
  98        unsigned int                    subclass;
  99        unsigned int                    dep_gen_id;
 100
 101        /*
 102         * IRQ/softirq usage tracking bits:
 103         */
 104        unsigned long                   usage_mask;
 105        const struct lock_trace         *usage_traces[XXX_LOCK_USAGE_STATES];
 106
 107        /*
 108         * Generation counter, when doing certain classes of graph walking,
 109         * to ensure that we check one node only once:
 110         */
 111        int                             name_version;
 112        const char                      *name;
 113
 114#ifdef CONFIG_LOCK_STAT
 115        unsigned long                   contention_point[LOCKSTAT_POINTS];
 116        unsigned long                   contending_point[LOCKSTAT_POINTS];
 117#endif
 118} __no_randomize_layout;
 119
 120#ifdef CONFIG_LOCK_STAT
 121struct lock_time {
 122        s64                             min;
 123        s64                             max;
 124        s64                             total;
 125        unsigned long                   nr;
 126};
 127
 128enum bounce_type {
 129        bounce_acquired_write,
 130        bounce_acquired_read,
 131        bounce_contended_write,
 132        bounce_contended_read,
 133        nr_bounce_types,
 134
 135        bounce_acquired = bounce_acquired_write,
 136        bounce_contended = bounce_contended_write,
 137};
 138
 139struct lock_class_stats {
 140        unsigned long                   contention_point[LOCKSTAT_POINTS];
 141        unsigned long                   contending_point[LOCKSTAT_POINTS];
 142        struct lock_time                read_waittime;
 143        struct lock_time                write_waittime;
 144        struct lock_time                read_holdtime;
 145        struct lock_time                write_holdtime;
 146        unsigned long                   bounces[nr_bounce_types];
 147};
 148
 149struct lock_class_stats lock_stats(struct lock_class *class);
 150void clear_lock_stats(struct lock_class *class);
 151#endif
 152
 153/*
 154 * Map the lock object (the lock instance) to the lock-class object.
 155 * This is embedded into specific lock instances:
 156 */
 157struct lockdep_map {
 158        struct lock_class_key           *key;
 159        struct lock_class               *class_cache[NR_LOCKDEP_CACHING_CLASSES];
 160        const char                      *name;
 161#ifdef CONFIG_LOCK_STAT
 162        int                             cpu;
 163        unsigned long                   ip;
 164#endif
 165};
 166
 167static inline void lockdep_copy_map(struct lockdep_map *to,
 168                                    struct lockdep_map *from)
 169{
 170        int i;
 171
 172        *to = *from;
 173        /*
 174         * Since the class cache can be modified concurrently we could observe
 175         * half pointers (64bit arch using 32bit copy insns). Therefore clear
 176         * the caches and take the performance hit.
 177         *
 178         * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
 179         *     that relies on cache abuse.
 180         */
 181        for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
 182                to->class_cache[i] = NULL;
 183}
 184
 185/*
 186 * Every lock has a list of other locks that were taken after it.
 187 * We only grow the list, never remove from it:
 188 */
 189struct lock_list {
 190        struct list_head                entry;
 191        struct lock_class               *class;
 192        struct lock_class               *links_to;
 193        const struct lock_trace         *trace;
 194        int                             distance;
 195
 196        /*
 197         * The parent field is used to implement breadth-first search, and the
 198         * bit 0 is reused to indicate if the lock has been accessed in BFS.
 199         */
 200        struct lock_list                *parent;
 201};
 202
 203/**
 204 * struct lock_chain - lock dependency chain record
 205 *
 206 * @irq_context: the same as irq_context in held_lock below
 207 * @depth:       the number of held locks in this chain
 208 * @base:        the index in chain_hlocks for this chain
 209 * @entry:       the collided lock chains in lock_chain hash list
 210 * @chain_key:   the hash key of this lock_chain
 211 */
 212struct lock_chain {
 213        /* see BUILD_BUG_ON()s in add_chain_cache() */
 214        unsigned int                    irq_context :  2,
 215                                        depth       :  6,
 216                                        base        : 24;
 217        /* 4 byte hole */
 218        struct hlist_node               entry;
 219        u64                             chain_key;
 220};
 221
 222#define MAX_LOCKDEP_KEYS_BITS           13
 223#define MAX_LOCKDEP_KEYS                (1UL << MAX_LOCKDEP_KEYS_BITS)
 224#define INITIAL_CHAIN_KEY               -1
 225
 226struct held_lock {
 227        /*
 228         * One-way hash of the dependency chain up to this point. We
 229         * hash the hashes step by step as the dependency chain grows.
 230         *
 231         * We use it for dependency-caching and we skip detection
 232         * passes and dependency-updates if there is a cache-hit, so
 233         * it is absolutely critical for 100% coverage of the validator
 234         * to have a unique key value for every unique dependency path
 235         * that can occur in the system, to make a unique hash value
 236         * as likely as possible - hence the 64-bit width.
 237         *
 238         * The task struct holds the current hash value (initialized
 239         * with zero), here we store the previous hash value:
 240         */
 241        u64                             prev_chain_key;
 242        unsigned long                   acquire_ip;
 243        struct lockdep_map              *instance;
 244        struct lockdep_map              *nest_lock;
 245#ifdef CONFIG_LOCK_STAT
 246        u64                             waittime_stamp;
 247        u64                             holdtime_stamp;
 248#endif
 249        /*
 250         * class_idx is zero-indexed; it points to the element in
 251         * lock_classes this held lock instance belongs to. class_idx is in
 252         * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
 253         */
 254        unsigned int                    class_idx:MAX_LOCKDEP_KEYS_BITS;
 255        /*
 256         * The lock-stack is unified in that the lock chains of interrupt
 257         * contexts nest ontop of process context chains, but we 'separate'
 258         * the hashes by starting with 0 if we cross into an interrupt
 259         * context, and we also keep do not add cross-context lock
 260         * dependencies - the lock usage graph walking covers that area
 261         * anyway, and we'd just unnecessarily increase the number of
 262         * dependencies otherwise. [Note: hardirq and softirq contexts
 263         * are separated from each other too.]
 264         *
 265         * The following field is used to detect when we cross into an
 266         * interrupt context:
 267         */
 268        unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
 269        unsigned int trylock:1;                                         /* 16 bits */
 270
 271        unsigned int read:2;        /* see lock_acquire() comment */
 272        unsigned int check:1;       /* see lock_acquire() comment */
 273        unsigned int hardirqs_off:1;
 274        unsigned int references:12;                                     /* 32 bits */
 275        unsigned int pin_count;
 276};
 277
 278/*
 279 * Initialization, self-test and debugging-output methods:
 280 */
 281extern void lockdep_init(void);
 282extern void lockdep_reset(void);
 283extern void lockdep_reset_lock(struct lockdep_map *lock);
 284extern void lockdep_free_key_range(void *start, unsigned long size);
 285extern asmlinkage void lockdep_sys_exit(void);
 286extern void lockdep_set_selftest_task(struct task_struct *task);
 287
 288extern void lockdep_init_task(struct task_struct *task);
 289
 290extern void lockdep_off(void);
 291extern void lockdep_on(void);
 292
 293extern void lockdep_register_key(struct lock_class_key *key);
 294extern void lockdep_unregister_key(struct lock_class_key *key);
 295
 296/*
 297 * These methods are used by specific locking variants (spinlocks,
 298 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
 299 * to lockdep:
 300 */
 301
 302extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
 303                             struct lock_class_key *key, int subclass);
 304
 305/*
 306 * Reinitialize a lock key - for cases where there is special locking or
 307 * special initialization of locks so that the validator gets the scope
 308 * of dependencies wrong: they are either too broad (they need a class-split)
 309 * or they are too narrow (they suffer from a false class-split):
 310 */
 311#define lockdep_set_class(lock, key) \
 312                lockdep_init_map(&(lock)->dep_map, #key, key, 0)
 313#define lockdep_set_class_and_name(lock, key, name) \
 314                lockdep_init_map(&(lock)->dep_map, name, key, 0)
 315#define lockdep_set_class_and_subclass(lock, key, sub) \
 316                lockdep_init_map(&(lock)->dep_map, #key, key, sub)
 317#define lockdep_set_subclass(lock, sub) \
 318                lockdep_init_map(&(lock)->dep_map, #lock, \
 319                                 (lock)->dep_map.key, sub)
 320
 321#define lockdep_set_novalidate_class(lock) \
 322        lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
 323/*
 324 * Compare locking classes
 325 */
 326#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
 327
 328static inline int lockdep_match_key(struct lockdep_map *lock,
 329                                    struct lock_class_key *key)
 330{
 331        return lock->key == key;
 332}
 333
 334/*
 335 * Acquire a lock.
 336 *
 337 * Values for "read":
 338 *
 339 *   0: exclusive (write) acquire
 340 *   1: read-acquire (no recursion allowed)
 341 *   2: read-acquire with same-instance recursion allowed
 342 *
 343 * Values for check:
 344 *
 345 *   0: simple checks (freeing, held-at-exit-time, etc.)
 346 *   1: full validation
 347 */
 348extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 349                         int trylock, int read, int check,
 350                         struct lockdep_map *nest_lock, unsigned long ip);
 351
 352extern void lock_release(struct lockdep_map *lock, unsigned long ip);
 353
 354/*
 355 * Same "read" as for lock_acquire(), except -1 means any.
 356 */
 357extern int lock_is_held_type(const struct lockdep_map *lock, int read);
 358
 359static inline int lock_is_held(const struct lockdep_map *lock)
 360{
 361        return lock_is_held_type(lock, -1);
 362}
 363
 364#define lockdep_is_held(lock)           lock_is_held(&(lock)->dep_map)
 365#define lockdep_is_held_type(lock, r)   lock_is_held_type(&(lock)->dep_map, (r))
 366
 367extern void lock_set_class(struct lockdep_map *lock, const char *name,
 368                           struct lock_class_key *key, unsigned int subclass,
 369                           unsigned long ip);
 370
 371static inline void lock_set_subclass(struct lockdep_map *lock,
 372                unsigned int subclass, unsigned long ip)
 373{
 374        lock_set_class(lock, lock->name, lock->key, subclass, ip);
 375}
 376
 377extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
 378
 379struct pin_cookie { unsigned int val; };
 380
 381#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
 382
 383extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
 384extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
 385extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
 386
 387#define lockdep_depth(tsk)      (debug_locks ? (tsk)->lockdep_depth : 0)
 388
 389#define lockdep_assert_held(l)  do {                            \
 390                WARN_ON(debug_locks && !lockdep_is_held(l));    \
 391        } while (0)
 392
 393#define lockdep_assert_held_write(l)    do {                    \
 394                WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));    \
 395        } while (0)
 396
 397#define lockdep_assert_held_read(l)     do {                            \
 398                WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));    \
 399        } while (0)
 400
 401#define lockdep_assert_held_once(l)     do {                            \
 402                WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));       \
 403        } while (0)
 404
 405#define lockdep_recursing(tsk)  ((tsk)->lockdep_recursion)
 406
 407#define lockdep_pin_lock(l)     lock_pin_lock(&(l)->dep_map)
 408#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
 409#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
 410
 411#else /* !CONFIG_LOCKDEP */
 412
 413static inline void lockdep_init_task(struct task_struct *task)
 414{
 415}
 416
 417static inline void lockdep_off(void)
 418{
 419}
 420
 421static inline void lockdep_on(void)
 422{
 423}
 424
 425static inline void lockdep_set_selftest_task(struct task_struct *task)
 426{
 427}
 428
 429# define lock_acquire(l, s, t, r, c, n, i)      do { } while (0)
 430# define lock_release(l, i)                     do { } while (0)
 431# define lock_downgrade(l, i)                   do { } while (0)
 432# define lock_set_class(l, n, k, s, i)          do { } while (0)
 433# define lock_set_subclass(l, s, i)             do { } while (0)
 434# define lockdep_init()                         do { } while (0)
 435# define lockdep_init_map(lock, name, key, sub) \
 436                do { (void)(name); (void)(key); } while (0)
 437# define lockdep_set_class(lock, key)           do { (void)(key); } while (0)
 438# define lockdep_set_class_and_name(lock, key, name) \
 439                do { (void)(key); (void)(name); } while (0)
 440#define lockdep_set_class_and_subclass(lock, key, sub) \
 441                do { (void)(key); } while (0)
 442#define lockdep_set_subclass(lock, sub)         do { } while (0)
 443
 444#define lockdep_set_novalidate_class(lock) do { } while (0)
 445
 446/*
 447 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
 448 * case since the result is not well defined and the caller should rather
 449 * #ifdef the call himself.
 450 */
 451
 452# define lockdep_reset()                do { debug_locks = 1; } while (0)
 453# define lockdep_free_key_range(start, size)    do { } while (0)
 454# define lockdep_sys_exit()                     do { } while (0)
 455/*
 456 * The class key takes no space if lockdep is disabled:
 457 */
 458struct lock_class_key { };
 459
 460static inline void lockdep_register_key(struct lock_class_key *key)
 461{
 462}
 463
 464static inline void lockdep_unregister_key(struct lock_class_key *key)
 465{
 466}
 467
 468/*
 469 * The lockdep_map takes no space if lockdep is disabled:
 470 */
 471struct lockdep_map { };
 472
 473#define lockdep_depth(tsk)      (0)
 474
 475#define lockdep_is_held_type(l, r)              (1)
 476
 477#define lockdep_assert_held(l)                  do { (void)(l); } while (0)
 478#define lockdep_assert_held_write(l)    do { (void)(l); } while (0)
 479#define lockdep_assert_held_read(l)             do { (void)(l); } while (0)
 480#define lockdep_assert_held_once(l)             do { (void)(l); } while (0)
 481
 482#define lockdep_recursing(tsk)                  (0)
 483
 484struct pin_cookie { };
 485
 486#define NIL_COOKIE (struct pin_cookie){ }
 487
 488#define lockdep_pin_lock(l)                     ({ struct pin_cookie cookie = { }; cookie; })
 489#define lockdep_repin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
 490#define lockdep_unpin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
 491
 492#endif /* !LOCKDEP */
 493
 494enum xhlock_context_t {
 495        XHLOCK_HARD,
 496        XHLOCK_SOFT,
 497        XHLOCK_CTX_NR,
 498};
 499
 500#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
 501/*
 502 * To initialize a lockdep_map statically use this macro.
 503 * Note that _name must not be NULL.
 504 */
 505#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
 506        { .name = (_name), .key = (void *)(_key), }
 507
 508static inline void lockdep_invariant_state(bool force) {}
 509static inline void lockdep_free_task(struct task_struct *task) {}
 510
 511#ifdef CONFIG_LOCK_STAT
 512
 513extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
 514extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
 515
 516#define LOCK_CONTENDED(_lock, try, lock)                        \
 517do {                                                            \
 518        if (!try(_lock)) {                                      \
 519                lock_contended(&(_lock)->dep_map, _RET_IP_);    \
 520                lock(_lock);                                    \
 521        }                                                       \
 522        lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
 523} while (0)
 524
 525#define LOCK_CONTENDED_RETURN(_lock, try, lock)                 \
 526({                                                              \
 527        int ____err = 0;                                        \
 528        if (!try(_lock)) {                                      \
 529                lock_contended(&(_lock)->dep_map, _RET_IP_);    \
 530                ____err = lock(_lock);                          \
 531        }                                                       \
 532        if (!____err)                                           \
 533                lock_acquired(&(_lock)->dep_map, _RET_IP_);     \
 534        ____err;                                                \
 535})
 536
 537#else /* CONFIG_LOCK_STAT */
 538
 539#define lock_contended(lockdep_map, ip) do {} while (0)
 540#define lock_acquired(lockdep_map, ip) do {} while (0)
 541
 542#define LOCK_CONTENDED(_lock, try, lock) \
 543        lock(_lock)
 544
 545#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
 546        lock(_lock)
 547
 548#endif /* CONFIG_LOCK_STAT */
 549
 550#ifdef CONFIG_LOCKDEP
 551
 552/*
 553 * On lockdep we dont want the hand-coded irq-enable of
 554 * _raw_*_lock_flags() code, because lockdep assumes
 555 * that interrupts are not re-enabled during lock-acquire:
 556 */
 557#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
 558        LOCK_CONTENDED((_lock), (try), (lock))
 559
 560#else /* CONFIG_LOCKDEP */
 561
 562#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
 563        lockfl((_lock), (flags))
 564
 565#endif /* CONFIG_LOCKDEP */
 566
 567#ifdef CONFIG_PROVE_LOCKING
 568extern void print_irqtrace_events(struct task_struct *curr);
 569#else
 570static inline void print_irqtrace_events(struct task_struct *curr)
 571{
 572}
 573#endif
 574
 575/*
 576 * For trivial one-depth nesting of a lock-class, the following
 577 * global define can be used. (Subsystems with multiple levels
 578 * of nesting should define their own lock-nesting subclasses.)
 579 */
 580#define SINGLE_DEPTH_NESTING                    1
 581
 582/*
 583 * Map the dependency ops to NOP or to real lockdep ops, depending
 584 * on the per lock-class debug mode:
 585 */
 586
 587#define lock_acquire_exclusive(l, s, t, n, i)           lock_acquire(l, s, t, 0, 1, n, i)
 588#define lock_acquire_shared(l, s, t, n, i)              lock_acquire(l, s, t, 1, 1, n, i)
 589#define lock_acquire_shared_recursive(l, s, t, n, i)    lock_acquire(l, s, t, 2, 1, n, i)
 590
 591#define spin_acquire(l, s, t, i)                lock_acquire_exclusive(l, s, t, NULL, i)
 592#define spin_acquire_nest(l, s, t, n, i)        lock_acquire_exclusive(l, s, t, n, i)
 593#define spin_release(l, i)                      lock_release(l, i)
 594
 595#define rwlock_acquire(l, s, t, i)              lock_acquire_exclusive(l, s, t, NULL, i)
 596#define rwlock_acquire_read(l, s, t, i)         lock_acquire_shared_recursive(l, s, t, NULL, i)
 597#define rwlock_release(l, i)                    lock_release(l, i)
 598
 599#define seqcount_acquire(l, s, t, i)            lock_acquire_exclusive(l, s, t, NULL, i)
 600#define seqcount_acquire_read(l, s, t, i)       lock_acquire_shared_recursive(l, s, t, NULL, i)
 601#define seqcount_release(l, i)                  lock_release(l, i)
 602
 603#define mutex_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
 604#define mutex_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
 605#define mutex_release(l, i)                     lock_release(l, i)
 606
 607#define rwsem_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
 608#define rwsem_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
 609#define rwsem_acquire_read(l, s, t, i)          lock_acquire_shared(l, s, t, NULL, i)
 610#define rwsem_release(l, i)                     lock_release(l, i)
 611
 612#define lock_map_acquire(l)                     lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
 613#define lock_map_acquire_read(l)                lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
 614#define lock_map_acquire_tryread(l)             lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
 615#define lock_map_release(l)                     lock_release(l, _THIS_IP_)
 616
 617#ifdef CONFIG_PROVE_LOCKING
 618# define might_lock(lock)                                               \
 619do {                                                                    \
 620        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
 621        lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
 622        lock_release(&(lock)->dep_map, _THIS_IP_);                      \
 623} while (0)
 624# define might_lock_read(lock)                                          \
 625do {                                                                    \
 626        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
 627        lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
 628        lock_release(&(lock)->dep_map, _THIS_IP_);                      \
 629} while (0)
 630# define might_lock_nested(lock, subclass)                              \
 631do {                                                                    \
 632        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
 633        lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,         \
 634                     _THIS_IP_);                                        \
 635        lock_release(&(lock)->dep_map, _THIS_IP_);                      \
 636} while (0)
 637
 638#define lockdep_assert_irqs_enabled()   do {                            \
 639                WARN_ONCE(debug_locks && !current->lockdep_recursion && \
 640                          !current->hardirqs_enabled,                   \
 641                          "IRQs not enabled as expected\n");            \
 642        } while (0)
 643
 644#define lockdep_assert_irqs_disabled()  do {                            \
 645                WARN_ONCE(debug_locks && !current->lockdep_recursion && \
 646                          current->hardirqs_enabled,                    \
 647                          "IRQs not disabled as expected\n");           \
 648        } while (0)
 649
 650#define lockdep_assert_in_irq() do {                                    \
 651                WARN_ONCE(debug_locks && !current->lockdep_recursion && \
 652                          !current->hardirq_context,                    \
 653                          "Not in hardirq as expected\n");              \
 654        } while (0)
 655
 656#else
 657# define might_lock(lock) do { } while (0)
 658# define might_lock_read(lock) do { } while (0)
 659# define might_lock_nested(lock, subclass) do { } while (0)
 660# define lockdep_assert_irqs_enabled() do { } while (0)
 661# define lockdep_assert_irqs_disabled() do { } while (0)
 662# define lockdep_assert_in_irq() do { } while (0)
 663#endif
 664
 665#ifdef CONFIG_LOCKDEP
 666void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
 667#else
 668static inline void
 669lockdep_rcu_suspicious(const char *file, const int line, const char *s)
 670{
 671}
 672#endif
 673
 674#endif /* __LINUX_LOCKDEP_H */
 675