linux/kernel/locking/lockdep.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kernel/lockdep.c
   4 *
   5 * Runtime locking correctness validator
   6 *
   7 * Started by Ingo Molnar:
   8 *
   9 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  10 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  11 *
  12 * this code maps all the lock dependencies as they occur in a live kernel
  13 * and will warn about the following classes of locking bugs:
  14 *
  15 * - lock inversion scenarios
  16 * - circular lock dependencies
  17 * - hardirq/softirq safe/unsafe locking bugs
  18 *
  19 * Bugs are reported even if the current locking scenario does not cause
  20 * any deadlock at this point.
  21 *
  22 * I.e. if anytime in the past two locks were taken in a different order,
  23 * even if it happened for another task, even if those were different
  24 * locks (but of the same class as this lock), this code will detect it.
  25 *
  26 * Thanks to Arjan van de Ven for coming up with the initial idea of
  27 * mapping lock dependencies runtime.
  28 */
  29#define DISABLE_BRANCH_PROFILING
  30#include <linux/mutex.h>
  31#include <linux/sched.h>
  32#include <linux/sched/clock.h>
  33#include <linux/sched/task.h>
  34#include <linux/sched/mm.h>
  35#include <linux/delay.h>
  36#include <linux/module.h>
  37#include <linux/proc_fs.h>
  38#include <linux/seq_file.h>
  39#include <linux/spinlock.h>
  40#include <linux/kallsyms.h>
  41#include <linux/interrupt.h>
  42#include <linux/stacktrace.h>
  43#include <linux/debug_locks.h>
  44#include <linux/irqflags.h>
  45#include <linux/utsname.h>
  46#include <linux/hash.h>
  47#include <linux/ftrace.h>
  48#include <linux/stringify.h>
  49#include <linux/bitmap.h>
  50#include <linux/bitops.h>
  51#include <linux/gfp.h>
  52#include <linux/random.h>
  53#include <linux/jhash.h>
  54#include <linux/nmi.h>
  55#include <linux/rcupdate.h>
  56#include <linux/kprobes.h>
  57#include <linux/lockdep.h>
  58
  59#include <asm/sections.h>
  60
  61#include "lockdep_internals.h"
  62
  63#define CREATE_TRACE_POINTS
  64#include <trace/events/lock.h>
  65
  66#ifdef CONFIG_PROVE_LOCKING
  67int prove_locking = 1;
  68module_param(prove_locking, int, 0644);
  69#else
  70#define prove_locking 0
  71#endif
  72
  73#ifdef CONFIG_LOCK_STAT
  74int lock_stat = 1;
  75module_param(lock_stat, int, 0644);
  76#else
  77#define lock_stat 0
  78#endif
  79
  80DEFINE_PER_CPU(unsigned int, lockdep_recursion);
  81EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
  82
  83static __always_inline bool lockdep_enabled(void)
  84{
  85        if (!debug_locks)
  86                return false;
  87
  88        if (this_cpu_read(lockdep_recursion))
  89                return false;
  90
  91        if (current->lockdep_recursion)
  92                return false;
  93
  94        return true;
  95}
  96
  97/*
  98 * lockdep_lock: protects the lockdep graph, the hashes and the
  99 *               class/list/hash allocators.
 100 *
 101 * This is one of the rare exceptions where it's justified
 102 * to use a raw spinlock - we really dont want the spinlock
 103 * code to recurse back into the lockdep code...
 104 */
 105static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 106static struct task_struct *__owner;
 107
 108static inline void lockdep_lock(void)
 109{
 110        DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 111
 112        __this_cpu_inc(lockdep_recursion);
 113        arch_spin_lock(&__lock);
 114        __owner = current;
 115}
 116
 117static inline void lockdep_unlock(void)
 118{
 119        DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 120
 121        if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
 122                return;
 123
 124        __owner = NULL;
 125        arch_spin_unlock(&__lock);
 126        __this_cpu_dec(lockdep_recursion);
 127}
 128
 129static inline bool lockdep_assert_locked(void)
 130{
 131        return DEBUG_LOCKS_WARN_ON(__owner != current);
 132}
 133
 134static struct task_struct *lockdep_selftest_task_struct;
 135
 136
 137static int graph_lock(void)
 138{
 139        lockdep_lock();
 140        /*
 141         * Make sure that if another CPU detected a bug while
 142         * walking the graph we dont change it (while the other
 143         * CPU is busy printing out stuff with the graph lock
 144         * dropped already)
 145         */
 146        if (!debug_locks) {
 147                lockdep_unlock();
 148                return 0;
 149        }
 150        return 1;
 151}
 152
 153static inline void graph_unlock(void)
 154{
 155        lockdep_unlock();
 156}
 157
 158/*
 159 * Turn lock debugging off and return with 0 if it was off already,
 160 * and also release the graph lock:
 161 */
 162static inline int debug_locks_off_graph_unlock(void)
 163{
 164        int ret = debug_locks_off();
 165
 166        lockdep_unlock();
 167
 168        return ret;
 169}
 170
 171unsigned long nr_list_entries;
 172static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 173static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
 174
 175/*
 176 * All data structures here are protected by the global debug_lock.
 177 *
 178 * nr_lock_classes is the number of elements of lock_classes[] that is
 179 * in use.
 180 */
 181#define KEYHASH_BITS            (MAX_LOCKDEP_KEYS_BITS - 1)
 182#define KEYHASH_SIZE            (1UL << KEYHASH_BITS)
 183static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
 184unsigned long nr_lock_classes;
 185unsigned long nr_zapped_classes;
 186#ifndef CONFIG_DEBUG_LOCKDEP
 187static
 188#endif
 189struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 190static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
 191
 192static inline struct lock_class *hlock_class(struct held_lock *hlock)
 193{
 194        unsigned int class_idx = hlock->class_idx;
 195
 196        /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
 197        barrier();
 198
 199        if (!test_bit(class_idx, lock_classes_in_use)) {
 200                /*
 201                 * Someone passed in garbage, we give up.
 202                 */
 203                DEBUG_LOCKS_WARN_ON(1);
 204                return NULL;
 205        }
 206
 207        /*
 208         * At this point, if the passed hlock->class_idx is still garbage,
 209         * we just have to live with it
 210         */
 211        return lock_classes + class_idx;
 212}
 213
 214#ifdef CONFIG_LOCK_STAT
 215static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
 216
 217static inline u64 lockstat_clock(void)
 218{
 219        return local_clock();
 220}
 221
 222static int lock_point(unsigned long points[], unsigned long ip)
 223{
 224        int i;
 225
 226        for (i = 0; i < LOCKSTAT_POINTS; i++) {
 227                if (points[i] == 0) {
 228                        points[i] = ip;
 229                        break;
 230                }
 231                if (points[i] == ip)
 232                        break;
 233        }
 234
 235        return i;
 236}
 237
 238static void lock_time_inc(struct lock_time *lt, u64 time)
 239{
 240        if (time > lt->max)
 241                lt->max = time;
 242
 243        if (time < lt->min || !lt->nr)
 244                lt->min = time;
 245
 246        lt->total += time;
 247        lt->nr++;
 248}
 249
 250static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
 251{
 252        if (!src->nr)
 253                return;
 254
 255        if (src->max > dst->max)
 256                dst->max = src->max;
 257
 258        if (src->min < dst->min || !dst->nr)
 259                dst->min = src->min;
 260
 261        dst->total += src->total;
 262        dst->nr += src->nr;
 263}
 264
 265struct lock_class_stats lock_stats(struct lock_class *class)
 266{
 267        struct lock_class_stats stats;
 268        int cpu, i;
 269
 270        memset(&stats, 0, sizeof(struct lock_class_stats));
 271        for_each_possible_cpu(cpu) {
 272                struct lock_class_stats *pcs =
 273                        &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 274
 275                for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
 276                        stats.contention_point[i] += pcs->contention_point[i];
 277
 278                for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
 279                        stats.contending_point[i] += pcs->contending_point[i];
 280
 281                lock_time_add(&pcs->read_waittime, &stats.read_waittime);
 282                lock_time_add(&pcs->write_waittime, &stats.write_waittime);
 283
 284                lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
 285                lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
 286
 287                for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
 288                        stats.bounces[i] += pcs->bounces[i];
 289        }
 290
 291        return stats;
 292}
 293
 294void clear_lock_stats(struct lock_class *class)
 295{
 296        int cpu;
 297
 298        for_each_possible_cpu(cpu) {
 299                struct lock_class_stats *cpu_stats =
 300                        &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 301
 302                memset(cpu_stats, 0, sizeof(struct lock_class_stats));
 303        }
 304        memset(class->contention_point, 0, sizeof(class->contention_point));
 305        memset(class->contending_point, 0, sizeof(class->contending_point));
 306}
 307
 308static struct lock_class_stats *get_lock_stats(struct lock_class *class)
 309{
 310        return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
 311}
 312
 313static void lock_release_holdtime(struct held_lock *hlock)
 314{
 315        struct lock_class_stats *stats;
 316        u64 holdtime;
 317
 318        if (!lock_stat)
 319                return;
 320
 321        holdtime = lockstat_clock() - hlock->holdtime_stamp;
 322
 323        stats = get_lock_stats(hlock_class(hlock));
 324        if (hlock->read)
 325                lock_time_inc(&stats->read_holdtime, holdtime);
 326        else
 327                lock_time_inc(&stats->write_holdtime, holdtime);
 328}
 329#else
 330static inline void lock_release_holdtime(struct held_lock *hlock)
 331{
 332}
 333#endif
 334
 335/*
 336 * We keep a global list of all lock classes. The list is only accessed with
 337 * the lockdep spinlock lock held. free_lock_classes is a list with free
 338 * elements. These elements are linked together by the lock_entry member in
 339 * struct lock_class.
 340 */
 341LIST_HEAD(all_lock_classes);
 342static LIST_HEAD(free_lock_classes);
 343
 344/**
 345 * struct pending_free - information about data structures about to be freed
 346 * @zapped: Head of a list with struct lock_class elements.
 347 * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
 348 *      are about to be freed.
 349 */
 350struct pending_free {
 351        struct list_head zapped;
 352        DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
 353};
 354
 355/**
 356 * struct delayed_free - data structures used for delayed freeing
 357 *
 358 * A data structure for delayed freeing of data structures that may be
 359 * accessed by RCU readers at the time these were freed.
 360 *
 361 * @rcu_head:  Used to schedule an RCU callback for freeing data structures.
 362 * @index:     Index of @pf to which freed data structures are added.
 363 * @scheduled: Whether or not an RCU callback has been scheduled.
 364 * @pf:        Array with information about data structures about to be freed.
 365 */
 366static struct delayed_free {
 367        struct rcu_head         rcu_head;
 368        int                     index;
 369        int                     scheduled;
 370        struct pending_free     pf[2];
 371} delayed_free;
 372
 373/*
 374 * The lockdep classes are in a hash-table as well, for fast lookup:
 375 */
 376#define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
 377#define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
 378#define __classhashfn(key)      hash_long((unsigned long)key, CLASSHASH_BITS)
 379#define classhashentry(key)     (classhash_table + __classhashfn((key)))
 380
 381static struct hlist_head classhash_table[CLASSHASH_SIZE];
 382
 383/*
 384 * We put the lock dependency chains into a hash-table as well, to cache
 385 * their existence:
 386 */
 387#define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
 388#define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
 389#define __chainhashfn(chain)    hash_long(chain, CHAINHASH_BITS)
 390#define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
 391
 392static struct hlist_head chainhash_table[CHAINHASH_SIZE];
 393
 394/*
 395 * the id of held_lock
 396 */
 397static inline u16 hlock_id(struct held_lock *hlock)
 398{
 399        BUILD_BUG_ON(MAX_LOCKDEP_KEYS_BITS + 2 > 16);
 400
 401        return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS));
 402}
 403
 404static inline unsigned int chain_hlock_class_idx(u16 hlock_id)
 405{
 406        return hlock_id & (MAX_LOCKDEP_KEYS - 1);
 407}
 408
 409/*
 410 * The hash key of the lock dependency chains is a hash itself too:
 411 * it's a hash of all locks taken up to that lock, including that lock.
 412 * It's a 64-bit hash, because it's important for the keys to be
 413 * unique.
 414 */
 415static inline u64 iterate_chain_key(u64 key, u32 idx)
 416{
 417        u32 k0 = key, k1 = key >> 32;
 418
 419        __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
 420
 421        return k0 | (u64)k1 << 32;
 422}
 423
 424void lockdep_init_task(struct task_struct *task)
 425{
 426        task->lockdep_depth = 0; /* no locks held yet */
 427        task->curr_chain_key = INITIAL_CHAIN_KEY;
 428        task->lockdep_recursion = 0;
 429}
 430
 431static __always_inline void lockdep_recursion_inc(void)
 432{
 433        __this_cpu_inc(lockdep_recursion);
 434}
 435
 436static __always_inline void lockdep_recursion_finish(void)
 437{
 438        if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion)))
 439                __this_cpu_write(lockdep_recursion, 0);
 440}
 441
 442void lockdep_set_selftest_task(struct task_struct *task)
 443{
 444        lockdep_selftest_task_struct = task;
 445}
 446
 447/*
 448 * Debugging switches:
 449 */
 450
 451#define VERBOSE                 0
 452#define VERY_VERBOSE            0
 453
 454#if VERBOSE
 455# define HARDIRQ_VERBOSE        1
 456# define SOFTIRQ_VERBOSE        1
 457#else
 458# define HARDIRQ_VERBOSE        0
 459# define SOFTIRQ_VERBOSE        0
 460#endif
 461
 462#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
 463/*
 464 * Quick filtering for interesting events:
 465 */
 466static int class_filter(struct lock_class *class)
 467{
 468#if 0
 469        /* Example */
 470        if (class->name_version == 1 &&
 471                        !strcmp(class->name, "lockname"))
 472                return 1;
 473        if (class->name_version == 1 &&
 474                        !strcmp(class->name, "&struct->lockfield"))
 475                return 1;
 476#endif
 477        /* Filter everything else. 1 would be to allow everything else */
 478        return 0;
 479}
 480#endif
 481
 482static int verbose(struct lock_class *class)
 483{
 484#if VERBOSE
 485        return class_filter(class);
 486#endif
 487        return 0;
 488}
 489
 490static void print_lockdep_off(const char *bug_msg)
 491{
 492        printk(KERN_DEBUG "%s\n", bug_msg);
 493        printk(KERN_DEBUG "turning off the locking correctness validator.\n");
 494#ifdef CONFIG_LOCK_STAT
 495        printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
 496#endif
 497}
 498
 499unsigned long nr_stack_trace_entries;
 500
 501#ifdef CONFIG_PROVE_LOCKING
 502/**
 503 * struct lock_trace - single stack backtrace
 504 * @hash_entry: Entry in a stack_trace_hash[] list.
 505 * @hash:       jhash() of @entries.
 506 * @nr_entries: Number of entries in @entries.
 507 * @entries:    Actual stack backtrace.
 508 */
 509struct lock_trace {
 510        struct hlist_node       hash_entry;
 511        u32                     hash;
 512        u32                     nr_entries;
 513        unsigned long           entries[] __aligned(sizeof(unsigned long));
 514};
 515#define LOCK_TRACE_SIZE_IN_LONGS                                \
 516        (sizeof(struct lock_trace) / sizeof(unsigned long))
 517/*
 518 * Stack-trace: sequence of lock_trace structures. Protected by the graph_lock.
 519 */
 520static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
 521static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE];
 522
 523static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
 524{
 525        return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries &&
 526                memcmp(t1->entries, t2->entries,
 527                       t1->nr_entries * sizeof(t1->entries[0])) == 0;
 528}
 529
 530static struct lock_trace *save_trace(void)
 531{
 532        struct lock_trace *trace, *t2;
 533        struct hlist_head *hash_head;
 534        u32 hash;
 535        int max_entries;
 536
 537        BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
 538        BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
 539
 540        trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
 541        max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
 542                LOCK_TRACE_SIZE_IN_LONGS;
 543
 544        if (max_entries <= 0) {
 545                if (!debug_locks_off_graph_unlock())
 546                        return NULL;
 547
 548                print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
 549                dump_stack();
 550
 551                return NULL;
 552        }
 553        trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
 554
 555        hash = jhash(trace->entries, trace->nr_entries *
 556                     sizeof(trace->entries[0]), 0);
 557        trace->hash = hash;
 558        hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1));
 559        hlist_for_each_entry(t2, hash_head, hash_entry) {
 560                if (traces_identical(trace, t2))
 561                        return t2;
 562        }
 563        nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries;
 564        hlist_add_head(&trace->hash_entry, hash_head);
 565
 566        return trace;
 567}
 568
 569/* Return the number of stack traces in the stack_trace[] array. */
 570u64 lockdep_stack_trace_count(void)
 571{
 572        struct lock_trace *trace;
 573        u64 c = 0;
 574        int i;
 575
 576        for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) {
 577                hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) {
 578                        c++;
 579                }
 580        }
 581
 582        return c;
 583}
 584
 585/* Return the number of stack hash chains that have at least one stack trace. */
 586u64 lockdep_stack_hash_count(void)
 587{
 588        u64 c = 0;
 589        int i;
 590
 591        for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++)
 592                if (!hlist_empty(&stack_trace_hash[i]))
 593                        c++;
 594
 595        return c;
 596}
 597#endif
 598
 599unsigned int nr_hardirq_chains;
 600unsigned int nr_softirq_chains;
 601unsigned int nr_process_chains;
 602unsigned int max_lockdep_depth;
 603
 604#ifdef CONFIG_DEBUG_LOCKDEP
 605/*
 606 * Various lockdep statistics:
 607 */
 608DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
 609#endif
 610
 611#ifdef CONFIG_PROVE_LOCKING
 612/*
 613 * Locking printouts:
 614 */
 615
 616#define __USAGE(__STATE)                                                \
 617        [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",       \
 618        [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",         \
 619        [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
 620        [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
 621
 622static const char *usage_str[] =
 623{
 624#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
 625#include "lockdep_states.h"
 626#undef LOCKDEP_STATE
 627        [LOCK_USED] = "INITIAL USE",
 628        [LOCK_USED_READ] = "INITIAL READ USE",
 629        /* abused as string storage for verify_lock_unused() */
 630        [LOCK_USAGE_STATES] = "IN-NMI",
 631};
 632#endif
 633
 634const char *__get_key_name(const struct lockdep_subclass_key *key, char *str)
 635{
 636        return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
 637}
 638
 639static inline unsigned long lock_flag(enum lock_usage_bit bit)
 640{
 641        return 1UL << bit;
 642}
 643
 644static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
 645{
 646        /*
 647         * The usage character defaults to '.' (i.e., irqs disabled and not in
 648         * irq context), which is the safest usage category.
 649         */
 650        char c = '.';
 651
 652        /*
 653         * The order of the following usage checks matters, which will
 654         * result in the outcome character as follows:
 655         *
 656         * - '+': irq is enabled and not in irq context
 657         * - '-': in irq context and irq is disabled
 658         * - '?': in irq context and irq is enabled
 659         */
 660        if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
 661                c = '+';
 662                if (class->usage_mask & lock_flag(bit))
 663                        c = '?';
 664        } else if (class->usage_mask & lock_flag(bit))
 665                c = '-';
 666
 667        return c;
 668}
 669
 670void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
 671{
 672        int i = 0;
 673
 674#define LOCKDEP_STATE(__STATE)                                          \
 675        usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);     \
 676        usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
 677#include "lockdep_states.h"
 678#undef LOCKDEP_STATE
 679
 680        usage[i] = '\0';
 681}
 682
 683static void __print_lock_name(struct lock_class *class)
 684{
 685        char str[KSYM_NAME_LEN];
 686        const char *name;
 687
 688        name = class->name;
 689        if (!name) {
 690                name = __get_key_name(class->key, str);
 691                printk(KERN_CONT "%s", name);
 692        } else {
 693                printk(KERN_CONT "%s", name);
 694                if (class->name_version > 1)
 695                        printk(KERN_CONT "#%d", class->name_version);
 696                if (class->subclass)
 697                        printk(KERN_CONT "/%d", class->subclass);
 698        }
 699}
 700
 701static void print_lock_name(struct lock_class *class)
 702{
 703        char usage[LOCK_USAGE_CHARS];
 704
 705        get_usage_chars(class, usage);
 706
 707        printk(KERN_CONT " (");
 708        __print_lock_name(class);
 709        printk(KERN_CONT "){%s}-{%d:%d}", usage,
 710                        class->wait_type_outer ?: class->wait_type_inner,
 711                        class->wait_type_inner);
 712}
 713
 714static void print_lockdep_cache(struct lockdep_map *lock)
 715{
 716        const char *name;
 717        char str[KSYM_NAME_LEN];
 718
 719        name = lock->name;
 720        if (!name)
 721                name = __get_key_name(lock->key->subkeys, str);
 722
 723        printk(KERN_CONT "%s", name);
 724}
 725
 726static void print_lock(struct held_lock *hlock)
 727{
 728        /*
 729         * We can be called locklessly through debug_show_all_locks() so be
 730         * extra careful, the hlock might have been released and cleared.
 731         *
 732         * If this indeed happens, lets pretend it does not hurt to continue
 733         * to print the lock unless the hlock class_idx does not point to a
 734         * registered class. The rationale here is: since we don't attempt
 735         * to distinguish whether we are in this situation, if it just
 736         * happened we can't count on class_idx to tell either.
 737         */
 738        struct lock_class *lock = hlock_class(hlock);
 739
 740        if (!lock) {
 741                printk(KERN_CONT "<RELEASED>\n");
 742                return;
 743        }
 744
 745        printk(KERN_CONT "%px", hlock->instance);
 746        print_lock_name(lock);
 747        printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
 748}
 749
 750static void lockdep_print_held_locks(struct task_struct *p)
 751{
 752        int i, depth = READ_ONCE(p->lockdep_depth);
 753
 754        if (!depth)
 755                printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
 756        else
 757                printk("%d lock%s held by %s/%d:\n", depth,
 758                       depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
 759        /*
 760         * It's not reliable to print a task's held locks if it's not sleeping
 761         * and it's not the current task.
 762         */
 763        if (p != current && task_is_running(p))
 764                return;
 765        for (i = 0; i < depth; i++) {
 766                printk(" #%d: ", i);
 767                print_lock(p->held_locks + i);
 768        }
 769}
 770
 771static void print_kernel_ident(void)
 772{
 773        printk("%s %.*s %s\n", init_utsname()->release,
 774                (int)strcspn(init_utsname()->version, " "),
 775                init_utsname()->version,
 776                print_tainted());
 777}
 778
 779static int very_verbose(struct lock_class *class)
 780{
 781#if VERY_VERBOSE
 782        return class_filter(class);
 783#endif
 784        return 0;
 785}
 786
 787/*
 788 * Is this the address of a static object:
 789 */
 790#ifdef __KERNEL__
 791/*
 792 * Check if an address is part of freed initmem. After initmem is freed,
 793 * memory can be allocated from it, and such allocations would then have
 794 * addresses within the range [_stext, _end].
 795 */
 796#ifndef arch_is_kernel_initmem_freed
 797static int arch_is_kernel_initmem_freed(unsigned long addr)
 798{
 799        if (system_state < SYSTEM_FREEING_INITMEM)
 800                return 0;
 801
 802        return init_section_contains((void *)addr, 1);
 803}
 804#endif
 805
 806static int static_obj(const void *obj)
 807{
 808        unsigned long start = (unsigned long) &_stext,
 809                      end   = (unsigned long) &_end,
 810                      addr  = (unsigned long) obj;
 811
 812        if (arch_is_kernel_initmem_freed(addr))
 813                return 0;
 814
 815        /*
 816         * static variable?
 817         */
 818        if ((addr >= start) && (addr < end))
 819                return 1;
 820
 821        /*
 822         * in-kernel percpu var?
 823         */
 824        if (is_kernel_percpu_address(addr))
 825                return 1;
 826
 827        /*
 828         * module static or percpu var?
 829         */
 830        return is_module_address(addr) || is_module_percpu_address(addr);
 831}
 832#endif
 833
 834/*
 835 * To make lock name printouts unique, we calculate a unique
 836 * class->name_version generation counter. The caller must hold the graph
 837 * lock.
 838 */
 839static int count_matching_names(struct lock_class *new_class)
 840{
 841        struct lock_class *class;
 842        int count = 0;
 843
 844        if (!new_class->name)
 845                return 0;
 846
 847        list_for_each_entry(class, &all_lock_classes, lock_entry) {
 848                if (new_class->key - new_class->subclass == class->key)
 849                        return class->name_version;
 850                if (class->name && !strcmp(class->name, new_class->name))
 851                        count = max(count, class->name_version);
 852        }
 853
 854        return count + 1;
 855}
 856
 857/* used from NMI context -- must be lockless */
 858static noinstr struct lock_class *
 859look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
 860{
 861        struct lockdep_subclass_key *key;
 862        struct hlist_head *hash_head;
 863        struct lock_class *class;
 864
 865        if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
 866                instrumentation_begin();
 867                debug_locks_off();
 868                printk(KERN_ERR
 869                        "BUG: looking up invalid subclass: %u\n", subclass);
 870                printk(KERN_ERR
 871                        "turning off the locking correctness validator.\n");
 872                dump_stack();
 873                instrumentation_end();
 874                return NULL;
 875        }
 876
 877        /*
 878         * If it is not initialised then it has never been locked,
 879         * so it won't be present in the hash table.
 880         */
 881        if (unlikely(!lock->key))
 882                return NULL;
 883
 884        /*
 885         * NOTE: the class-key must be unique. For dynamic locks, a static
 886         * lock_class_key variable is passed in through the mutex_init()
 887         * (or spin_lock_init()) call - which acts as the key. For static
 888         * locks we use the lock object itself as the key.
 889         */
 890        BUILD_BUG_ON(sizeof(struct lock_class_key) >
 891                        sizeof(struct lockdep_map));
 892
 893        key = lock->key->subkeys + subclass;
 894
 895        hash_head = classhashentry(key);
 896
 897        /*
 898         * We do an RCU walk of the hash, see lockdep_free_key_range().
 899         */
 900        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
 901                return NULL;
 902
 903        hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) {
 904                if (class->key == key) {
 905                        /*
 906                         * Huh! same key, different name? Did someone trample
 907                         * on some memory? We're most confused.
 908                         */
 909                        WARN_ON_ONCE(class->name != lock->name &&
 910                                     lock->key != &__lockdep_no_validate__);
 911                        return class;
 912                }
 913        }
 914
 915        return NULL;
 916}
 917
 918/*
 919 * Static locks do not have their class-keys yet - for them the key is
 920 * the lock object itself. If the lock is in the per cpu area, the
 921 * canonical address of the lock (per cpu offset removed) is used.
 922 */
 923static bool assign_lock_key(struct lockdep_map *lock)
 924{
 925        unsigned long can_addr, addr = (unsigned long)lock;
 926
 927#ifdef __KERNEL__
 928        /*
 929         * lockdep_free_key_range() assumes that struct lock_class_key
 930         * objects do not overlap. Since we use the address of lock
 931         * objects as class key for static objects, check whether the
 932         * size of lock_class_key objects does not exceed the size of
 933         * the smallest lock object.
 934         */
 935        BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
 936#endif
 937
 938        if (__is_kernel_percpu_address(addr, &can_addr))
 939                lock->key = (void *)can_addr;
 940        else if (__is_module_percpu_address(addr, &can_addr))
 941                lock->key = (void *)can_addr;
 942        else if (static_obj(lock))
 943                lock->key = (void *)lock;
 944        else {
 945                /* Debug-check: all keys must be persistent! */
 946                debug_locks_off();
 947                pr_err("INFO: trying to register non-static key.\n");
 948                pr_err("The code is fine but needs lockdep annotation, or maybe\n");
 949                pr_err("you didn't initialize this object before use?\n");
 950                pr_err("turning off the locking correctness validator.\n");
 951                dump_stack();
 952                return false;
 953        }
 954
 955        return true;
 956}
 957
 958#ifdef CONFIG_DEBUG_LOCKDEP
 959
 960/* Check whether element @e occurs in list @h */
 961static bool in_list(struct list_head *e, struct list_head *h)
 962{
 963        struct list_head *f;
 964
 965        list_for_each(f, h) {
 966                if (e == f)
 967                        return true;
 968        }
 969
 970        return false;
 971}
 972
 973/*
 974 * Check whether entry @e occurs in any of the locks_after or locks_before
 975 * lists.
 976 */
 977static bool in_any_class_list(struct list_head *e)
 978{
 979        struct lock_class *class;
 980        int i;
 981
 982        for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
 983                class = &lock_classes[i];
 984                if (in_list(e, &class->locks_after) ||
 985                    in_list(e, &class->locks_before))
 986                        return true;
 987        }
 988        return false;
 989}
 990
 991static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
 992{
 993        struct lock_list *e;
 994
 995        list_for_each_entry(e, h, entry) {
 996                if (e->links_to != c) {
 997                        printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
 998                               c->name ? : "(?)",
 999                               (unsigned long)(e - list_entries),
1000                               e->links_to && e->links_to->name ?
1001                               e->links_to->name : "(?)",
1002                               e->class && e->class->name ? e->class->name :
1003                               "(?)");
1004                        return false;
1005                }
1006        }
1007        return true;
1008}
1009
1010#ifdef CONFIG_PROVE_LOCKING
1011static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1012#endif
1013
1014static bool check_lock_chain_key(struct lock_chain *chain)
1015{
1016#ifdef CONFIG_PROVE_LOCKING
1017        u64 chain_key = INITIAL_CHAIN_KEY;
1018        int i;
1019
1020        for (i = chain->base; i < chain->base + chain->depth; i++)
1021                chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
1022        /*
1023         * The 'unsigned long long' casts avoid that a compiler warning
1024         * is reported when building tools/lib/lockdep.
1025         */
1026        if (chain->chain_key != chain_key) {
1027                printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
1028                       (unsigned long long)(chain - lock_chains),
1029                       (unsigned long long)chain->chain_key,
1030                       (unsigned long long)chain_key);
1031                return false;
1032        }
1033#endif
1034        return true;
1035}
1036
1037static bool in_any_zapped_class_list(struct lock_class *class)
1038{
1039        struct pending_free *pf;
1040        int i;
1041
1042        for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
1043                if (in_list(&class->lock_entry, &pf->zapped))
1044                        return true;
1045        }
1046
1047        return false;
1048}
1049
1050static bool __check_data_structures(void)
1051{
1052        struct lock_class *class;
1053        struct lock_chain *chain;
1054        struct hlist_head *head;
1055        struct lock_list *e;
1056        int i;
1057
1058        /* Check whether all classes occur in a lock list. */
1059        for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1060                class = &lock_classes[i];
1061                if (!in_list(&class->lock_entry, &all_lock_classes) &&
1062                    !in_list(&class->lock_entry, &free_lock_classes) &&
1063                    !in_any_zapped_class_list(class)) {
1064                        printk(KERN_INFO "class %px/%s is not in any class list\n",
1065                               class, class->name ? : "(?)");
1066                        return false;
1067                }
1068        }
1069
1070        /* Check whether all classes have valid lock lists. */
1071        for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1072                class = &lock_classes[i];
1073                if (!class_lock_list_valid(class, &class->locks_before))
1074                        return false;
1075                if (!class_lock_list_valid(class, &class->locks_after))
1076                        return false;
1077        }
1078
1079        /* Check the chain_key of all lock chains. */
1080        for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
1081                head = chainhash_table + i;
1082                hlist_for_each_entry_rcu(chain, head, entry) {
1083                        if (!check_lock_chain_key(chain))
1084                                return false;
1085                }
1086        }
1087
1088        /*
1089         * Check whether all list entries that are in use occur in a class
1090         * lock list.
1091         */
1092        for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1093                e = list_entries + i;
1094                if (!in_any_class_list(&e->entry)) {
1095                        printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
1096                               (unsigned int)(e - list_entries),
1097                               e->class->name ? : "(?)",
1098                               e->links_to->name ? : "(?)");
1099                        return false;
1100                }
1101        }
1102
1103        /*
1104         * Check whether all list entries that are not in use do not occur in
1105         * a class lock list.
1106         */
1107        for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1108                e = list_entries + i;
1109                if (in_any_class_list(&e->entry)) {
1110                        printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
1111                               (unsigned int)(e - list_entries),
1112                               e->class && e->class->name ? e->class->name :
1113                               "(?)",
1114                               e->links_to && e->links_to->name ?
1115                               e->links_to->name : "(?)");
1116                        return false;
1117                }
1118        }
1119
1120        return true;
1121}
1122
1123int check_consistency = 0;
1124module_param(check_consistency, int, 0644);
1125
1126static void check_data_structures(void)
1127{
1128        static bool once = false;
1129
1130        if (check_consistency && !once) {
1131                if (!__check_data_structures()) {
1132                        once = true;
1133                        WARN_ON(once);
1134                }
1135        }
1136}
1137
1138#else /* CONFIG_DEBUG_LOCKDEP */
1139
1140static inline void check_data_structures(void) { }
1141
1142#endif /* CONFIG_DEBUG_LOCKDEP */
1143
1144static void init_chain_block_buckets(void);
1145
1146/*
1147 * Initialize the lock_classes[] array elements, the free_lock_classes list
1148 * and also the delayed_free structure.
1149 */
1150static void init_data_structures_once(void)
1151{
1152        static bool __read_mostly ds_initialized, rcu_head_initialized;
1153        int i;
1154
1155        if (likely(rcu_head_initialized))
1156                return;
1157
1158        if (system_state >= SYSTEM_SCHEDULING) {
1159                init_rcu_head(&delayed_free.rcu_head);
1160                rcu_head_initialized = true;
1161        }
1162
1163        if (ds_initialized)
1164                return;
1165
1166        ds_initialized = true;
1167
1168        INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
1169        INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
1170
1171        for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1172                list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
1173                INIT_LIST_HEAD(&lock_classes[i].locks_after);
1174                INIT_LIST_HEAD(&lock_classes[i].locks_before);
1175        }
1176        init_chain_block_buckets();
1177}
1178
1179static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
1180{
1181        unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
1182
1183        return lock_keys_hash + hash;
1184}
1185
1186/* Register a dynamically allocated key. */
1187void lockdep_register_key(struct lock_class_key *key)
1188{
1189        struct hlist_head *hash_head;
1190        struct lock_class_key *k;
1191        unsigned long flags;
1192
1193        if (WARN_ON_ONCE(static_obj(key)))
1194                return;
1195        hash_head = keyhashentry(key);
1196
1197        raw_local_irq_save(flags);
1198        if (!graph_lock())
1199                goto restore_irqs;
1200        hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1201                if (WARN_ON_ONCE(k == key))
1202                        goto out_unlock;
1203        }
1204        hlist_add_head_rcu(&key->hash_entry, hash_head);
1205out_unlock:
1206        graph_unlock();
1207restore_irqs:
1208        raw_local_irq_restore(flags);
1209}
1210EXPORT_SYMBOL_GPL(lockdep_register_key);
1211
1212/* Check whether a key has been registered as a dynamic key. */
1213static bool is_dynamic_key(const struct lock_class_key *key)
1214{
1215        struct hlist_head *hash_head;
1216        struct lock_class_key *k;
1217        bool found = false;
1218
1219        if (WARN_ON_ONCE(static_obj(key)))
1220                return false;
1221
1222        /*
1223         * If lock debugging is disabled lock_keys_hash[] may contain
1224         * pointers to memory that has already been freed. Avoid triggering
1225         * a use-after-free in that case by returning early.
1226         */
1227        if (!debug_locks)
1228                return true;
1229
1230        hash_head = keyhashentry(key);
1231
1232        rcu_read_lock();
1233        hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1234                if (k == key) {
1235                        found = true;
1236                        break;
1237                }
1238        }
1239        rcu_read_unlock();
1240
1241        return found;
1242}
1243
1244/*
1245 * Register a lock's class in the hash-table, if the class is not present
1246 * yet. Otherwise we look it up. We cache the result in the lock object
1247 * itself, so actual lookup of the hash should be once per lock object.
1248 */
1249static struct lock_class *
1250register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1251{
1252        struct lockdep_subclass_key *key;
1253        struct hlist_head *hash_head;
1254        struct lock_class *class;
1255
1256        DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1257
1258        class = look_up_lock_class(lock, subclass);
1259        if (likely(class))
1260                goto out_set_class_cache;
1261
1262        if (!lock->key) {
1263                if (!assign_lock_key(lock))
1264                        return NULL;
1265        } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
1266                return NULL;
1267        }
1268
1269        key = lock->key->subkeys + subclass;
1270        hash_head = classhashentry(key);
1271
1272        if (!graph_lock()) {
1273                return NULL;
1274        }
1275        /*
1276         * We have to do the hash-walk again, to avoid races
1277         * with another CPU:
1278         */
1279        hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
1280                if (class->key == key)
1281                        goto out_unlock_set;
1282        }
1283
1284        init_data_structures_once();
1285
1286        /* Allocate a new lock class and add it to the hash. */
1287        class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
1288                                         lock_entry);
1289        if (!class) {
1290                if (!debug_locks_off_graph_unlock()) {
1291                        return NULL;
1292                }
1293
1294                print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1295                dump_stack();
1296                return NULL;
1297        }
1298        nr_lock_classes++;
1299        __set_bit(class - lock_classes, lock_classes_in_use);
1300        debug_atomic_inc(nr_unused_locks);
1301        class->key = key;
1302        class->name = lock->name;
1303        class->subclass = subclass;
1304        WARN_ON_ONCE(!list_empty(&class->locks_before));
1305        WARN_ON_ONCE(!list_empty(&class->locks_after));
1306        class->name_version = count_matching_names(class);
1307        class->wait_type_inner = lock->wait_type_inner;
1308        class->wait_type_outer = lock->wait_type_outer;
1309        class->lock_type = lock->lock_type;
1310        /*
1311         * We use RCU's safe list-add method to make
1312         * parallel walking of the hash-list safe:
1313         */
1314        hlist_add_head_rcu(&class->hash_entry, hash_head);
1315        /*
1316         * Remove the class from the free list and add it to the global list
1317         * of classes.
1318         */
1319        list_move_tail(&class->lock_entry, &all_lock_classes);
1320
1321        if (verbose(class)) {
1322                graph_unlock();
1323
1324                printk("\nnew class %px: %s", class->key, class->name);
1325                if (class->name_version > 1)
1326                        printk(KERN_CONT "#%d", class->name_version);
1327                printk(KERN_CONT "\n");
1328                dump_stack();
1329
1330                if (!graph_lock()) {
1331                        return NULL;
1332                }
1333        }
1334out_unlock_set:
1335        graph_unlock();
1336
1337out_set_class_cache:
1338        if (!subclass || force)
1339                lock->class_cache[0] = class;
1340        else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
1341                lock->class_cache[subclass] = class;
1342
1343        /*
1344         * Hash collision, did we smoke some? We found a class with a matching
1345         * hash but the subclass -- which is hashed in -- didn't match.
1346         */
1347        if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1348                return NULL;
1349
1350        return class;
1351}
1352
1353#ifdef CONFIG_PROVE_LOCKING
1354/*
1355 * Allocate a lockdep entry. (assumes the graph_lock held, returns
1356 * with NULL on failure)
1357 */
1358static struct lock_list *alloc_list_entry(void)
1359{
1360        int idx = find_first_zero_bit(list_entries_in_use,
1361                                      ARRAY_SIZE(list_entries));
1362
1363        if (idx >= ARRAY_SIZE(list_entries)) {
1364                if (!debug_locks_off_graph_unlock())
1365                        return NULL;
1366
1367                print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1368                dump_stack();
1369                return NULL;
1370        }
1371        nr_list_entries++;
1372        __set_bit(idx, list_entries_in_use);
1373        return list_entries + idx;
1374}
1375
1376/*
1377 * Add a new dependency to the head of the list:
1378 */
1379static int add_lock_to_list(struct lock_class *this,
1380                            struct lock_class *links_to, struct list_head *head,
1381                            unsigned long ip, u16 distance, u8 dep,
1382                            const struct lock_trace *trace)
1383{
1384        struct lock_list *entry;
1385        /*
1386         * Lock not present yet - get a new dependency struct and
1387         * add it to the list:
1388         */
1389        entry = alloc_list_entry();
1390        if (!entry)
1391                return 0;
1392
1393        entry->class = this;
1394        entry->links_to = links_to;
1395        entry->dep = dep;
1396        entry->distance = distance;
1397        entry->trace = trace;
1398        /*
1399         * Both allocation and removal are done under the graph lock; but
1400         * iteration is under RCU-sched; see look_up_lock_class() and
1401         * lockdep_free_key_range().
1402         */
1403        list_add_tail_rcu(&entry->entry, head);
1404
1405        return 1;
1406}
1407
1408/*
1409 * For good efficiency of modular, we use power of 2
1410 */
1411#define MAX_CIRCULAR_QUEUE_SIZE         (1UL << CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS)
1412#define CQ_MASK                         (MAX_CIRCULAR_QUEUE_SIZE-1)
1413
1414/*
1415 * The circular_queue and helpers are used to implement graph
1416 * breadth-first search (BFS) algorithm, by which we can determine
1417 * whether there is a path from a lock to another. In deadlock checks,
1418 * a path from the next lock to be acquired to a previous held lock
1419 * indicates that adding the <prev> -> <next> lock dependency will
1420 * produce a circle in the graph. Breadth-first search instead of
1421 * depth-first search is used in order to find the shortest (circular)
1422 * path.
1423 */
1424struct circular_queue {
1425        struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
1426        unsigned int  front, rear;
1427};
1428
1429static struct circular_queue lock_cq;
1430
1431unsigned int max_bfs_queue_depth;
1432
1433static unsigned int lockdep_dependency_gen_id;
1434
1435static inline void __cq_init(struct circular_queue *cq)
1436{
1437        cq->front = cq->rear = 0;
1438        lockdep_dependency_gen_id++;
1439}
1440
1441static inline int __cq_empty(struct circular_queue *cq)
1442{
1443        return (cq->front == cq->rear);
1444}
1445
1446static inline int __cq_full(struct circular_queue *cq)
1447{
1448        return ((cq->rear + 1) & CQ_MASK) == cq->front;
1449}
1450
1451static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem)
1452{
1453        if (__cq_full(cq))
1454                return -1;
1455
1456        cq->element[cq->rear] = elem;
1457        cq->rear = (cq->rear + 1) & CQ_MASK;
1458        return 0;
1459}
1460
1461/*
1462 * Dequeue an element from the circular_queue, return a lock_list if
1463 * the queue is not empty, or NULL if otherwise.
1464 */
1465static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
1466{
1467        struct lock_list * lock;
1468
1469        if (__cq_empty(cq))
1470                return NULL;
1471
1472        lock = cq->element[cq->front];
1473        cq->front = (cq->front + 1) & CQ_MASK;
1474
1475        return lock;
1476}
1477
1478static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
1479{
1480        return (cq->rear - cq->front) & CQ_MASK;
1481}
1482
1483static inline void mark_lock_accessed(struct lock_list *lock)
1484{
1485        lock->class->dep_gen_id = lockdep_dependency_gen_id;
1486}
1487
1488static inline void visit_lock_entry(struct lock_list *lock,
1489                                    struct lock_list *parent)
1490{
1491        lock->parent = parent;
1492}
1493
1494static inline unsigned long lock_accessed(struct lock_list *lock)
1495{
1496        return lock->class->dep_gen_id == lockdep_dependency_gen_id;
1497}
1498
1499static inline struct lock_list *get_lock_parent(struct lock_list *child)
1500{
1501        return child->parent;
1502}
1503
1504static inline int get_lock_depth(struct lock_list *child)
1505{
1506        int depth = 0;
1507        struct lock_list *parent;
1508
1509        while ((parent = get_lock_parent(child))) {
1510                child = parent;
1511                depth++;
1512        }
1513        return depth;
1514}
1515
1516/*
1517 * Return the forward or backward dependency list.
1518 *
1519 * @lock:   the lock_list to get its class's dependency list
1520 * @offset: the offset to struct lock_class to determine whether it is
1521 *          locks_after or locks_before
1522 */
1523static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
1524{
1525        void *lock_class = lock->class;
1526
1527        return lock_class + offset;
1528}
1529/*
1530 * Return values of a bfs search:
1531 *
1532 * BFS_E* indicates an error
1533 * BFS_R* indicates a result (match or not)
1534 *
1535 * BFS_EINVALIDNODE: Find a invalid node in the graph.
1536 *
1537 * BFS_EQUEUEFULL: The queue is full while doing the bfs.
1538 *
1539 * BFS_RMATCH: Find the matched node in the graph, and put that node into
1540 *             *@target_entry.
1541 *
1542 * BFS_RNOMATCH: Haven't found the matched node and keep *@target_entry
1543 *               _unchanged_.
1544 */
1545enum bfs_result {
1546        BFS_EINVALIDNODE = -2,
1547        BFS_EQUEUEFULL = -1,
1548        BFS_RMATCH = 0,
1549        BFS_RNOMATCH = 1,
1550};
1551
1552/*
1553 * bfs_result < 0 means error
1554 */
1555static inline bool bfs_error(enum bfs_result res)
1556{
1557        return res < 0;
1558}
1559
1560/*
1561 * DEP_*_BIT in lock_list::dep
1562 *
1563 * For dependency @prev -> @next:
1564 *
1565 *   SR: @prev is shared reader (->read != 0) and @next is recursive reader
1566 *       (->read == 2)
1567 *   ER: @prev is exclusive locker (->read == 0) and @next is recursive reader
1568 *   SN: @prev is shared reader and @next is non-recursive locker (->read != 2)
1569 *   EN: @prev is exclusive locker and @next is non-recursive locker
1570 *
1571 * Note that we define the value of DEP_*_BITs so that:
1572 *   bit0 is prev->read == 0
1573 *   bit1 is next->read != 2
1574 */
1575#define DEP_SR_BIT (0 + (0 << 1)) /* 0 */
1576#define DEP_ER_BIT (1 + (0 << 1)) /* 1 */
1577#define DEP_SN_BIT (0 + (1 << 1)) /* 2 */
1578#define DEP_EN_BIT (1 + (1 << 1)) /* 3 */
1579
1580#define DEP_SR_MASK (1U << (DEP_SR_BIT))
1581#define DEP_ER_MASK (1U << (DEP_ER_BIT))
1582#define DEP_SN_MASK (1U << (DEP_SN_BIT))
1583#define DEP_EN_MASK (1U << (DEP_EN_BIT))
1584
1585static inline unsigned int
1586__calc_dep_bit(struct held_lock *prev, struct held_lock *next)
1587{
1588        return (prev->read == 0) + ((next->read != 2) << 1);
1589}
1590
1591static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next)
1592{
1593        return 1U << __calc_dep_bit(prev, next);
1594}
1595
1596/*
1597 * calculate the dep_bit for backwards edges. We care about whether @prev is
1598 * shared and whether @next is recursive.
1599 */
1600static inline unsigned int
1601__calc_dep_bitb(struct held_lock *prev, struct held_lock *next)
1602{
1603        return (next->read != 2) + ((prev->read == 0) << 1);
1604}
1605
1606static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next)
1607{
1608        return 1U << __calc_dep_bitb(prev, next);
1609}
1610
1611/*
1612 * Initialize a lock_list entry @lock belonging to @class as the root for a BFS
1613 * search.
1614 */
1615static inline void __bfs_init_root(struct lock_list *lock,
1616                                   struct lock_class *class)
1617{
1618        lock->class = class;
1619        lock->parent = NULL;
1620        lock->only_xr = 0;
1621}
1622
1623/*
1624 * Initialize a lock_list entry @lock based on a lock acquisition @hlock as the
1625 * root for a BFS search.
1626 *
1627 * ->only_xr of the initial lock node is set to @hlock->read == 2, to make sure
1628 * that <prev> -> @hlock and @hlock -> <whatever __bfs() found> is not -(*R)->
1629 * and -(S*)->.
1630 */
1631static inline void bfs_init_root(struct lock_list *lock,
1632                                 struct held_lock *hlock)
1633{
1634        __bfs_init_root(lock, hlock_class(hlock));
1635        lock->only_xr = (hlock->read == 2);
1636}
1637
1638/*
1639 * Similar to bfs_init_root() but initialize the root for backwards BFS.
1640 *
1641 * ->only_xr of the initial lock node is set to @hlock->read != 0, to make sure
1642 * that <next> -> @hlock and @hlock -> <whatever backwards BFS found> is not
1643 * -(*S)-> and -(R*)-> (reverse order of -(*R)-> and -(S*)->).
1644 */
1645static inline void bfs_init_rootb(struct lock_list *lock,
1646                                  struct held_lock *hlock)
1647{
1648        __bfs_init_root(lock, hlock_class(hlock));
1649        lock->only_xr = (hlock->read != 0);
1650}
1651
1652static inline struct lock_list *__bfs_next(struct lock_list *lock, int offset)
1653{
1654        if (!lock || !lock->parent)
1655                return NULL;
1656
1657        return list_next_or_null_rcu(get_dep_list(lock->parent, offset),
1658                                     &lock->entry, struct lock_list, entry);
1659}
1660
1661/*
1662 * Breadth-First Search to find a strong path in the dependency graph.
1663 *
1664 * @source_entry: the source of the path we are searching for.
1665 * @data: data used for the second parameter of @match function
1666 * @match: match function for the search
1667 * @target_entry: pointer to the target of a matched path
1668 * @offset: the offset to struct lock_class to determine whether it is
1669 *          locks_after or locks_before
1670 *
1671 * We may have multiple edges (considering different kinds of dependencies,
1672 * e.g. ER and SN) between two nodes in the dependency graph. But
1673 * only the strong dependency path in the graph is relevant to deadlocks. A
1674 * strong dependency path is a dependency path that doesn't have two adjacent
1675 * dependencies as -(*R)-> -(S*)->, please see:
1676 *
1677 *         Documentation/locking/lockdep-design.rst
1678 *
1679 * for more explanation of the definition of strong dependency paths
1680 *
1681 * In __bfs(), we only traverse in the strong dependency path:
1682 *
1683 *     In lock_list::only_xr, we record whether the previous dependency only
1684 *     has -(*R)-> in the search, and if it does (prev only has -(*R)->), we
1685 *     filter out any -(S*)-> in the current dependency and after that, the
1686 *     ->only_xr is set according to whether we only have -(*R)-> left.
1687 */
1688static enum bfs_result __bfs(struct lock_list *source_entry,
1689                             void *data,
1690                             bool (*match)(struct lock_list *entry, void *data),
1691                             bool (*skip)(struct lock_list *entry, void *data),
1692                             struct lock_list **target_entry,
1693                             int offset)
1694{
1695        struct circular_queue *cq = &lock_cq;
1696        struct lock_list *lock = NULL;
1697        struct lock_list *entry;
1698        struct list_head *head;
1699        unsigned int cq_depth;
1700        bool first;
1701
1702        lockdep_assert_locked();
1703
1704        __cq_init(cq);
1705        __cq_enqueue(cq, source_entry);
1706
1707        while ((lock = __bfs_next(lock, offset)) || (lock = __cq_dequeue(cq))) {
1708                if (!lock->class)
1709                        return BFS_EINVALIDNODE;
1710
1711                /*
1712                 * Step 1: check whether we already finish on this one.
1713                 *
1714                 * If we have visited all the dependencies from this @lock to
1715                 * others (iow, if we have visited all lock_list entries in
1716                 * @lock->class->locks_{after,before}) we skip, otherwise go
1717                 * and visit all the dependencies in the list and mark this
1718                 * list accessed.
1719                 */
1720                if (lock_accessed(lock))
1721                        continue;
1722                else
1723                        mark_lock_accessed(lock);
1724
1725                /*
1726                 * Step 2: check whether prev dependency and this form a strong
1727                 *         dependency path.
1728                 */
1729                if (lock->parent) { /* Parent exists, check prev dependency */
1730                        u8 dep = lock->dep;
1731                        bool prev_only_xr = lock->parent->only_xr;
1732
1733                        /*
1734                         * Mask out all -(S*)-> if we only have *R in previous
1735                         * step, because -(*R)-> -(S*)-> don't make up a strong
1736                         * dependency.
1737                         */
1738                        if (prev_only_xr)
1739                                dep &= ~(DEP_SR_MASK | DEP_SN_MASK);
1740
1741                        /* If nothing left, we skip */
1742                        if (!dep)
1743                                continue;
1744
1745                        /* If there are only -(*R)-> left, set that for the next step */
1746                        lock->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK));
1747                }
1748
1749                /*
1750                 * Step 3: we haven't visited this and there is a strong
1751                 *         dependency path to this, so check with @match.
1752                 *         If @skip is provide and returns true, we skip this
1753                 *         lock (and any path this lock is in).
1754                 */
1755                if (skip && skip(lock, data))
1756                        continue;
1757
1758                if (match(lock, data)) {
1759                        *target_entry = lock;
1760                        return BFS_RMATCH;
1761                }
1762
1763                /*
1764                 * Step 4: if not match, expand the path by adding the
1765                 *         forward or backwards dependencies in the search
1766                 *
1767                 */
1768                first = true;
1769                head = get_dep_list(lock, offset);
1770                list_for_each_entry_rcu(entry, head, entry) {
1771                        visit_lock_entry(entry, lock);
1772
1773                        /*
1774                         * Note we only enqueue the first of the list into the
1775                         * queue, because we can always find a sibling
1776                         * dependency from one (see __bfs_next()), as a result
1777                         * the space of queue is saved.
1778                         */
1779                        if (!first)
1780                                continue;
1781
1782                        first = false;
1783
1784                        if (__cq_enqueue(cq, entry))
1785                                return BFS_EQUEUEFULL;
1786
1787                        cq_depth = __cq_get_elem_count(cq);
1788                        if (max_bfs_queue_depth < cq_depth)
1789                                max_bfs_queue_depth = cq_depth;
1790                }
1791        }
1792
1793        return BFS_RNOMATCH;
1794}
1795
1796static inline enum bfs_result
1797__bfs_forwards(struct lock_list *src_entry,
1798               void *data,
1799               bool (*match)(struct lock_list *entry, void *data),
1800               bool (*skip)(struct lock_list *entry, void *data),
1801               struct lock_list **target_entry)
1802{
1803        return __bfs(src_entry, data, match, skip, target_entry,
1804                     offsetof(struct lock_class, locks_after));
1805
1806}
1807
1808static inline enum bfs_result
1809__bfs_backwards(struct lock_list *src_entry,
1810                void *data,
1811                bool (*match)(struct lock_list *entry, void *data),
1812               bool (*skip)(struct lock_list *entry, void *data),
1813                struct lock_list **target_entry)
1814{
1815        return __bfs(src_entry, data, match, skip, target_entry,
1816                     offsetof(struct lock_class, locks_before));
1817
1818}
1819
1820static void print_lock_trace(const struct lock_trace *trace,
1821                             unsigned int spaces)
1822{
1823        stack_trace_print(trace->entries, trace->nr_entries, spaces);
1824}
1825
1826/*
1827 * Print a dependency chain entry (this is only done when a deadlock
1828 * has been detected):
1829 */
1830static noinline void
1831print_circular_bug_entry(struct lock_list *target, int depth)
1832{
1833        if (debug_locks_silent)
1834                return;
1835        printk("\n-> #%u", depth);
1836        print_lock_name(target->class);
1837        printk(KERN_CONT ":\n");
1838        print_lock_trace(target->trace, 6);
1839}
1840
1841static void
1842print_circular_lock_scenario(struct held_lock *src,
1843                             struct held_lock *tgt,
1844                             struct lock_list *prt)
1845{
1846        struct lock_class *source = hlock_class(src);
1847        struct lock_class *target = hlock_class(tgt);
1848        struct lock_class *parent = prt->class;
1849
1850        /*
1851         * A direct locking problem where unsafe_class lock is taken
1852         * directly by safe_class lock, then all we need to show
1853         * is the deadlock scenario, as it is obvious that the
1854         * unsafe lock is taken under the safe lock.
1855         *
1856         * But if there is a chain instead, where the safe lock takes
1857         * an intermediate lock (middle_class) where this lock is
1858         * not the same as the safe lock, then the lock chain is
1859         * used to describe the problem. Otherwise we would need
1860         * to show a different CPU case for each link in the chain
1861         * from the safe_class lock to the unsafe_class lock.
1862         */
1863        if (parent != source) {
1864                printk("Chain exists of:\n  ");
1865                __print_lock_name(source);
1866                printk(KERN_CONT " --> ");
1867                __print_lock_name(parent);
1868                printk(KERN_CONT " --> ");
1869                __print_lock_name(target);
1870                printk(KERN_CONT "\n\n");
1871        }
1872
1873        printk(" Possible unsafe locking scenario:\n\n");
1874        printk("       CPU0                    CPU1\n");
1875        printk("       ----                    ----\n");
1876        printk("  lock(");
1877        __print_lock_name(target);
1878        printk(KERN_CONT ");\n");
1879        printk("                               lock(");
1880        __print_lock_name(parent);
1881        printk(KERN_CONT ");\n");
1882        printk("                               lock(");
1883        __print_lock_name(target);
1884        printk(KERN_CONT ");\n");
1885        printk("  lock(");
1886        __print_lock_name(source);
1887        printk(KERN_CONT ");\n");
1888        printk("\n *** DEADLOCK ***\n\n");
1889}
1890
1891/*
1892 * When a circular dependency is detected, print the
1893 * header first:
1894 */
1895static noinline void
1896print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1897                        struct held_lock *check_src,
1898                        struct held_lock *check_tgt)
1899{
1900        struct task_struct *curr = current;
1901
1902        if (debug_locks_silent)
1903                return;
1904
1905        pr_warn("\n");
1906        pr_warn("======================================================\n");
1907        pr_warn("WARNING: possible circular locking dependency detected\n");
1908        print_kernel_ident();
1909        pr_warn("------------------------------------------------------\n");
1910        pr_warn("%s/%d is trying to acquire lock:\n",
1911                curr->comm, task_pid_nr(curr));
1912        print_lock(check_src);
1913
1914        pr_warn("\nbut task is already holding lock:\n");
1915
1916        print_lock(check_tgt);
1917        pr_warn("\nwhich lock already depends on the new lock.\n\n");
1918        pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1919
1920        print_circular_bug_entry(entry, depth);
1921}
1922
1923/*
1924 * We are about to add A -> B into the dependency graph, and in __bfs() a
1925 * strong dependency path A -> .. -> B is found: hlock_class equals
1926 * entry->class.
1927 *
1928 * If A -> .. -> B can replace A -> B in any __bfs() search (means the former
1929 * is _stronger_ than or equal to the latter), we consider A -> B as redundant.
1930 * For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A
1931 * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the
1932 * dependency graph, as any strong path ..-> A -> B ->.. we can get with
1933 * having dependency A -> B, we could already get a equivalent path ..-> A ->
1934 * .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant.
1935 *
1936 * We need to make sure both the start and the end of A -> .. -> B is not
1937 * weaker than A -> B. For the start part, please see the comment in
1938 * check_redundant(). For the end part, we need:
1939 *
1940 * Either
1941 *
1942 *     a) A -> B is -(*R)-> (everything is not weaker than that)
1943 *
1944 * or
1945 *
1946 *     b) A -> .. -> B is -(*N)-> (nothing is stronger than this)
1947 *
1948 */
1949static inline bool hlock_equal(struct lock_list *entry, void *data)
1950{
1951        struct held_lock *hlock = (struct held_lock *)data;
1952
1953        return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */
1954               (hlock->read == 2 ||  /* A -> B is -(*R)-> */
1955                !entry->only_xr); /* A -> .. -> B is -(*N)-> */
1956}
1957
1958/*
1959 * We are about to add B -> A into the dependency graph, and in __bfs() a
1960 * strong dependency path A -> .. -> B is found: hlock_class equals
1961 * entry->class.
1962 *
1963 * We will have a deadlock case (conflict) if A -> .. -> B -> A is a strong
1964 * dependency cycle, that means:
1965 *
1966 * Either
1967 *
1968 *     a) B -> A is -(E*)->
1969 *
1970 * or
1971 *
1972 *     b) A -> .. -> B is -(*N)-> (i.e. A -> .. -(*N)-> B)
1973 *
1974 * as then we don't have -(*R)-> -(S*)-> in the cycle.
1975 */
1976static inline bool hlock_conflict(struct lock_list *entry, void *data)
1977{
1978        struct held_lock *hlock = (struct held_lock *)data;
1979
1980        return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */
1981               (hlock->read == 0 || /* B -> A is -(E*)-> */
1982                !entry->only_xr); /* A -> .. -> B is -(*N)-> */
1983}
1984
1985static noinline void print_circular_bug(struct lock_list *this,
1986                                struct lock_list *target,
1987                                struct held_lock *check_src,
1988                                struct held_lock *check_tgt)
1989{
1990        struct task_struct *curr = current;
1991        struct lock_list *parent;
1992        struct lock_list *first_parent;
1993        int depth;
1994
1995        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1996                return;
1997
1998        this->trace = save_trace();
1999        if (!this->trace)
2000                return;
2001
2002        depth = get_lock_depth(target);
2003
2004        print_circular_bug_header(target, depth, check_src, check_tgt);
2005
2006        parent = get_lock_parent(target);
2007        first_parent = parent;
2008
2009        while (parent) {
2010                print_circular_bug_entry(parent, --depth);
2011                parent = get_lock_parent(parent);
2012        }
2013
2014        printk("\nother info that might help us debug this:\n\n");
2015        print_circular_lock_scenario(check_src, check_tgt,
2016                                     first_parent);
2017
2018        lockdep_print_held_locks(curr);
2019
2020        printk("\nstack backtrace:\n");
2021        dump_stack();
2022}
2023
2024static noinline void print_bfs_bug(int ret)
2025{
2026        if (!debug_locks_off_graph_unlock())
2027                return;
2028
2029        /*
2030         * Breadth-first-search failed, graph got corrupted?
2031         */
2032        WARN(1, "lockdep bfs error:%d\n", ret);
2033}
2034
2035static bool noop_count(struct lock_list *entry, void *data)
2036{
2037        (*(unsigned long *)data)++;
2038        return false;
2039}
2040
2041static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
2042{
2043        unsigned long  count = 0;
2044        struct lock_list *target_entry;
2045
2046        __bfs_forwards(this, (void *)&count, noop_count, NULL, &target_entry);
2047
2048        return count;
2049}
2050unsigned long lockdep_count_forward_deps(struct lock_class *class)
2051{
2052        unsigned long ret, flags;
2053        struct lock_list this;
2054
2055        __bfs_init_root(&this, class);
2056
2057        raw_local_irq_save(flags);
2058        lockdep_lock();
2059        ret = __lockdep_count_forward_deps(&this);
2060        lockdep_unlock();
2061        raw_local_irq_restore(flags);
2062
2063        return ret;
2064}
2065
2066static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
2067{
2068        unsigned long  count = 0;
2069        struct lock_list *target_entry;
2070
2071        __bfs_backwards(this, (void *)&count, noop_count, NULL, &target_entry);
2072
2073        return count;
2074}
2075
2076unsigned long lockdep_count_backward_deps(struct lock_class *class)
2077{
2078        unsigned long ret, flags;
2079        struct lock_list this;
2080
2081        __bfs_init_root(&this, class);
2082
2083        raw_local_irq_save(flags);
2084        lockdep_lock();
2085        ret = __lockdep_count_backward_deps(&this);
2086        lockdep_unlock();
2087        raw_local_irq_restore(flags);
2088
2089        return ret;
2090}
2091
2092/*
2093 * Check that the dependency graph starting at <src> can lead to
2094 * <target> or not.
2095 */
2096static noinline enum bfs_result
2097check_path(struct held_lock *target, struct lock_list *src_entry,
2098           bool (*match)(struct lock_list *entry, void *data),
2099           bool (*skip)(struct lock_list *entry, void *data),
2100           struct lock_list **target_entry)
2101{
2102        enum bfs_result ret;
2103
2104        ret = __bfs_forwards(src_entry, target, match, skip, target_entry);
2105
2106        if (unlikely(bfs_error(ret)))
2107                print_bfs_bug(ret);
2108
2109        return ret;
2110}
2111
2112/*
2113 * Prove that the dependency graph starting at <src> can not
2114 * lead to <target>. If it can, there is a circle when adding
2115 * <target> -> <src> dependency.
2116 *
2117 * Print an error and return BFS_RMATCH if it does.
2118 */
2119static noinline enum bfs_result
2120check_noncircular(struct held_lock *src, struct held_lock *target,
2121                  struct lock_trace **const trace)
2122{
2123        enum bfs_result ret;
2124        struct lock_list *target_entry;
2125        struct lock_list src_entry;
2126
2127        bfs_init_root(&src_entry, src);
2128
2129        debug_atomic_inc(nr_cyclic_checks);
2130
2131        ret = check_path(target, &src_entry, hlock_conflict, NULL, &target_entry);
2132
2133        if (unlikely(ret == BFS_RMATCH)) {
2134                if (!*trace) {
2135                        /*
2136                         * If save_trace fails here, the printing might
2137                         * trigger a WARN but because of the !nr_entries it
2138                         * should not do bad things.
2139                         */
2140                        *trace = save_trace();
2141                }
2142
2143                print_circular_bug(&src_entry, target_entry, src, target);
2144        }
2145
2146        return ret;
2147}
2148
2149#ifdef CONFIG_TRACE_IRQFLAGS
2150
2151/*
2152 * Forwards and backwards subgraph searching, for the purposes of
2153 * proving that two subgraphs can be connected by a new dependency
2154 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
2155 *
2156 * A irq safe->unsafe deadlock happens with the following conditions:
2157 *
2158 * 1) We have a strong dependency path A -> ... -> B
2159 *
2160 * 2) and we have ENABLED_IRQ usage of B and USED_IN_IRQ usage of A, therefore
2161 *    irq can create a new dependency B -> A (consider the case that a holder
2162 *    of B gets interrupted by an irq whose handler will try to acquire A).
2163 *
2164 * 3) the dependency circle A -> ... -> B -> A we get from 1) and 2) is a
2165 *    strong circle:
2166 *
2167 *      For the usage bits of B:
2168 *        a) if A -> B is -(*N)->, then B -> A could be any type, so any
2169 *           ENABLED_IRQ usage suffices.
2170 *        b) if A -> B is -(*R)->, then B -> A must be -(E*)->, so only
2171 *           ENABLED_IRQ_*_READ usage suffices.
2172 *
2173 *      For the usage bits of A:
2174 *        c) if A -> B is -(E*)->, then B -> A could be any type, so any
2175 *           USED_IN_IRQ usage suffices.
2176 *        d) if A -> B is -(S*)->, then B -> A must be -(*N)->, so only
2177 *           USED_IN_IRQ_*_READ usage suffices.
2178 */
2179
2180/*
2181 * There is a strong dependency path in the dependency graph: A -> B, and now
2182 * we need to decide which usage bit of A should be accumulated to detect
2183 * safe->unsafe bugs.
2184 *
2185 * Note that usage_accumulate() is used in backwards search, so ->only_xr
2186 * stands for whether A -> B only has -(S*)-> (in this case ->only_xr is true).
2187 *
2188 * As above, if only_xr is false, which means A -> B has -(E*)-> dependency
2189 * path, any usage of A should be considered. Otherwise, we should only
2190 * consider _READ usage.
2191 */
2192static inline bool usage_accumulate(struct lock_list *entry, void *mask)
2193{
2194        if (!entry->only_xr)
2195                *(unsigned long *)mask |= entry->class->usage_mask;
2196        else /* Mask out _READ usage bits */
2197                *(unsigned long *)mask |= (entry->class->usage_mask & LOCKF_IRQ);
2198
2199        return false;
2200}
2201
2202/*
2203 * There is a strong dependency path in the dependency graph: A -> B, and now
2204 * we need to decide which usage bit of B conflicts with the usage bits of A,
2205 * i.e. which usage bit of B may introduce safe->unsafe deadlocks.
2206 *
2207 * As above, if only_xr is false, which means A -> B has -(*N)-> dependency
2208 * path, any usage of B should be considered. Otherwise, we should only
2209 * consider _READ usage.
2210 */
2211static inline bool usage_match(struct lock_list *entry, void *mask)
2212{
2213        if (!entry->only_xr)
2214                return !!(entry->class->usage_mask & *(unsigned long *)mask);
2215        else /* Mask out _READ usage bits */
2216                return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask);
2217}
2218
2219static inline bool usage_skip(struct lock_list *entry, void *mask)
2220{
2221        /*
2222         * Skip local_lock() for irq inversion detection.
2223         *
2224         * For !RT, local_lock() is not a real lock, so it won't carry any
2225         * dependency.
2226         *
2227         * For RT, an irq inversion happens when we have lock A and B, and on
2228         * some CPU we can have:
2229         *
2230         *      lock(A);
2231         *      <interrupted>
2232         *        lock(B);
2233         *
2234         * where lock(B) cannot sleep, and we have a dependency B -> ... -> A.
2235         *
2236         * Now we prove local_lock() cannot exist in that dependency. First we
2237         * have the observation for any lock chain L1 -> ... -> Ln, for any
2238         * 1 <= i <= n, Li.inner_wait_type <= L1.inner_wait_type, otherwise
2239         * wait context check will complain. And since B is not a sleep lock,
2240         * therefore B.inner_wait_type >= 2, and since the inner_wait_type of
2241         * local_lock() is 3, which is greater than 2, therefore there is no
2242         * way the local_lock() exists in the dependency B -> ... -> A.
2243         *
2244         * As a result, we will skip local_lock(), when we search for irq
2245         * inversion bugs.
2246         */
2247        if (entry->class->lock_type == LD_LOCK_PERCPU) {
2248                if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
2249                        return false;
2250
2251                return true;
2252        }
2253
2254        return false;
2255}
2256
2257/*
2258 * Find a node in the forwards-direction dependency sub-graph starting
2259 * at @root->class that matches @bit.
2260 *
2261 * Return BFS_MATCH if such a node exists in the subgraph, and put that node
2262 * into *@target_entry.
2263 */
2264static enum bfs_result
2265find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
2266                        struct lock_list **target_entry)
2267{
2268        enum bfs_result result;
2269
2270        debug_atomic_inc(nr_find_usage_forwards_checks);
2271
2272        result = __bfs_forwards(root, &usage_mask, usage_match, usage_skip, target_entry);
2273
2274        return result;
2275}
2276
2277/*
2278 * Find a node in the backwards-direction dependency sub-graph starting
2279 * at @root->class that matches @bit.
2280 */
2281static enum bfs_result
2282find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
2283                        struct lock_list **target_entry)
2284{
2285        enum bfs_result result;
2286
2287        debug_atomic_inc(nr_find_usage_backwards_checks);
2288
2289        result = __bfs_backwards(root, &usage_mask, usage_match, usage_skip, target_entry);
2290
2291        return result;
2292}
2293
2294static void print_lock_class_header(struct lock_class *class, int depth)
2295{
2296        int bit;
2297
2298        printk("%*s->", depth, "");
2299        print_lock_name(class);
2300#ifdef CONFIG_DEBUG_LOCKDEP
2301        printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
2302#endif
2303        printk(KERN_CONT " {\n");
2304
2305        for (bit = 0; bit < LOCK_TRACE_STATES; bit++) {
2306                if (class->usage_mask & (1 << bit)) {
2307                        int len = depth;
2308
2309                        len += printk("%*s   %s", depth, "", usage_str[bit]);
2310                        len += printk(KERN_CONT " at:\n");
2311                        print_lock_trace(class->usage_traces[bit], len);
2312                }
2313        }
2314        printk("%*s }\n", depth, "");
2315
2316        printk("%*s ... key      at: [<%px>] %pS\n",
2317                depth, "", class->key, class->key);
2318}
2319
2320/*
2321 * Dependency path printing:
2322 *
2323 * After BFS we get a lock dependency path (linked via ->parent of lock_list),
2324 * printing out each lock in the dependency path will help on understanding how
2325 * the deadlock could happen. Here are some details about dependency path
2326 * printing:
2327 *
2328 * 1)   A lock_list can be either forwards or backwards for a lock dependency,
2329 *      for a lock dependency A -> B, there are two lock_lists:
2330 *
2331 *      a)      lock_list in the ->locks_after list of A, whose ->class is B and
2332 *              ->links_to is A. In this case, we can say the lock_list is
2333 *              "A -> B" (forwards case).
2334 *
2335 *      b)      lock_list in the ->locks_before list of B, whose ->class is A
2336 *              and ->links_to is B. In this case, we can say the lock_list is
2337 *              "B <- A" (bacwards case).
2338 *
2339 *      The ->trace of both a) and b) point to the call trace where B was
2340 *      acquired with A held.
2341 *
2342 * 2)   A "helper" lock_list is introduced during BFS, this lock_list doesn't
2343 *      represent a certain lock dependency, it only provides an initial entry
2344 *      for BFS. For example, BFS may introduce a "helper" lock_list whose
2345 *      ->class is A, as a result BFS will search all dependencies starting with
2346 *      A, e.g. A -> B or A -> C.
2347 *
2348 *      The notation of a forwards helper lock_list is like "-> A", which means
2349 *      we should search the forwards dependencies starting with "A", e.g A -> B
2350 *      or A -> C.
2351 *
2352 *      The notation of a bacwards helper lock_list is like "<- B", which means
2353 *      we should search the backwards dependencies ending with "B", e.g.
2354 *      B <- A or B <- C.
2355 */
2356
2357/*
2358 * printk the shortest lock dependencies from @root to @leaf in reverse order.
2359 *
2360 * We have a lock dependency path as follow:
2361 *
2362 *    @root                                                                 @leaf
2363 *      |                                                                     |
2364 *      V                                                                     V
2365 *                ->parent                                   ->parent
2366 * | lock_list | <--------- | lock_list | ... | lock_list  | <--------- | lock_list |
2367 * |    -> L1  |            | L1 -> L2  | ... |Ln-2 -> Ln-1|            | Ln-1 -> Ln|
2368 *
2369 * , so it's natural that we start from @leaf and print every ->class and
2370 * ->trace until we reach the @root.
2371 */
2372static void __used
2373print_shortest_lock_dependencies(struct lock_list *leaf,
2374                                 struct lock_list *root)
2375{
2376        struct lock_list *entry = leaf;
2377        int depth;
2378
2379        /*compute depth from generated tree by BFS*/
2380        depth = get_lock_depth(leaf);
2381
2382        do {
2383                print_lock_class_header(entry->class, depth);
2384                printk("%*s ... acquired at:\n", depth, "");
2385                print_lock_trace(entry->trace, 2);
2386                printk("\n");
2387
2388                if (depth == 0 && (entry != root)) {
2389                        printk("lockdep:%s bad path found in chain graph\n", __func__);
2390                        break;
2391                }
2392
2393                entry = get_lock_parent(entry);
2394                depth--;
2395        } while (entry && (depth >= 0));
2396}
2397
2398/*
2399 * printk the shortest lock dependencies from @leaf to @root.
2400 *
2401 * We have a lock dependency path (from a backwards search) as follow:
2402 *
2403 *    @leaf                                                                 @root
2404 *      |                                                                     |
2405 *      V                                                                     V
2406 *                ->parent                                   ->parent
2407 * | lock_list | ---------> | lock_list | ... | lock_list  | ---------> | lock_list |
2408 * | L2 <- L1  |            | L3 <- L2  | ... | Ln <- Ln-1 |            |    <- Ln  |
2409 *
2410 * , so when we iterate from @leaf to @root, we actually print the lock
2411 * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order.
2412 *
2413 * Another thing to notice here is that ->class of L2 <- L1 is L1, while the
2414 * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call
2415 * trace of L1 in the dependency path, which is alright, because most of the
2416 * time we can figure out where L1 is held from the call trace of L2.
2417 */
2418static void __used
2419print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
2420                                           struct lock_list *root)
2421{
2422        struct lock_list *entry = leaf;
2423        const struct lock_trace *trace = NULL;
2424        int depth;
2425
2426        /*compute depth from generated tree by BFS*/
2427        depth = get_lock_depth(leaf);
2428
2429        do {
2430                print_lock_class_header(entry->class, depth);
2431                if (trace) {
2432                        printk("%*s ... acquired at:\n", depth, "");
2433                        print_lock_trace(trace, 2);
2434                        printk("\n");
2435                }
2436
2437                /*
2438                 * Record the pointer to the trace for the next lock_list
2439                 * entry, see the comments for the function.
2440                 */
2441                trace = entry->trace;
2442
2443                if (depth == 0 && (entry != root)) {
2444                        printk("lockdep:%s bad path found in chain graph\n", __func__);
2445                        break;
2446                }
2447
2448                entry = get_lock_parent(entry);
2449                depth--;
2450        } while (entry && (depth >= 0));
2451}
2452
2453static void
2454print_irq_lock_scenario(struct lock_list *safe_entry,
2455                        struct lock_list *unsafe_entry,
2456                        struct lock_class *prev_class,
2457                        struct lock_class *next_class)
2458{
2459        struct lock_class *safe_class = safe_entry->class;
2460        struct lock_class *unsafe_class = unsafe_entry->class;
2461        struct lock_class *middle_class = prev_class;
2462
2463        if (middle_class == safe_class)
2464                middle_class = next_class;
2465
2466        /*
2467         * A direct locking problem where unsafe_class lock is taken
2468         * directly by safe_class lock, then all we need to show
2469         * is the deadlock scenario, as it is obvious that the
2470         * unsafe lock is taken under the safe lock.
2471         *
2472         * But if there is a chain instead, where the safe lock takes
2473         * an intermediate lock (middle_class) where this lock is
2474         * not the same as the safe lock, then the lock chain is
2475         * used to describe the problem. Otherwise we would need
2476         * to show a different CPU case for each link in the chain
2477         * from the safe_class lock to the unsafe_class lock.
2478         */
2479        if (middle_class != unsafe_class) {
2480                printk("Chain exists of:\n  ");
2481                __print_lock_name(safe_class);
2482                printk(KERN_CONT " --> ");
2483                __print_lock_name(middle_class);
2484                printk(KERN_CONT " --> ");
2485                __print_lock_name(unsafe_class);
2486                printk(KERN_CONT "\n\n");
2487        }
2488
2489        printk(" Possible interrupt unsafe locking scenario:\n\n");
2490        printk("       CPU0                    CPU1\n");
2491        printk("       ----                    ----\n");
2492        printk("  lock(");
2493        __print_lock_name(unsafe_class);
2494        printk(KERN_CONT ");\n");
2495        printk("                               local_irq_disable();\n");
2496        printk("                               lock(");
2497        __print_lock_name(safe_class);
2498        printk(KERN_CONT ");\n");
2499        printk("                               lock(");
2500        __print_lock_name(middle_class);
2501        printk(KERN_CONT ");\n");
2502        printk("  <Interrupt>\n");
2503        printk("    lock(");
2504        __print_lock_name(safe_class);
2505        printk(KERN_CONT ");\n");
2506        printk("\n *** DEADLOCK ***\n\n");
2507}
2508
2509static void
2510print_bad_irq_dependency(struct task_struct *curr,
2511                         struct lock_list *prev_root,
2512                         struct lock_list *next_root,
2513                         struct lock_list *backwards_entry,
2514                         struct lock_list *forwards_entry,
2515                         struct held_lock *prev,
2516                         struct held_lock *next,
2517                         enum lock_usage_bit bit1,
2518                         enum lock_usage_bit bit2,
2519                         const char *irqclass)
2520{
2521        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2522                return;
2523
2524        pr_warn("\n");
2525        pr_warn("=====================================================\n");
2526        pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
2527                irqclass, irqclass);
2528        print_kernel_ident();
2529        pr_warn("-----------------------------------------------------\n");
2530        pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
2531                curr->comm, task_pid_nr(curr),
2532                lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
2533                curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
2534                lockdep_hardirqs_enabled(),
2535                curr->softirqs_enabled);
2536        print_lock(next);
2537
2538        pr_warn("\nand this task is already holding:\n");
2539        print_lock(prev);
2540        pr_warn("which would create a new lock dependency:\n");
2541        print_lock_name(hlock_class(prev));
2542        pr_cont(" ->");
2543        print_lock_name(hlock_class(next));
2544        pr_cont("\n");
2545
2546        pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
2547                irqclass);
2548        print_lock_name(backwards_entry->class);
2549        pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
2550
2551        print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
2552
2553        pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
2554        print_lock_name(forwards_entry->class);
2555        pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
2556        pr_warn("...");
2557
2558        print_lock_trace(forwards_entry->class->usage_traces[bit2], 1);
2559
2560        pr_warn("\nother info that might help us debug this:\n\n");
2561        print_irq_lock_scenario(backwards_entry, forwards_entry,
2562                                hlock_class(prev), hlock_class(next));
2563
2564        lockdep_print_held_locks(curr);
2565
2566        pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
2567        print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
2568
2569        pr_warn("\nthe dependencies between the lock to be acquired");
2570        pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
2571        next_root->trace = save_trace();
2572        if (!next_root->trace)
2573                return;
2574        print_shortest_lock_dependencies(forwards_entry, next_root);
2575
2576        pr_warn("\nstack backtrace:\n");
2577        dump_stack();
2578}
2579
2580static const char *state_names[] = {
2581#define LOCKDEP_STATE(__STATE) \
2582        __stringify(__STATE),
2583#include "lockdep_states.h"
2584#undef LOCKDEP_STATE
2585};
2586
2587static const char *state_rnames[] = {
2588#define LOCKDEP_STATE(__STATE) \
2589        __stringify(__STATE)"-READ",
2590#include "lockdep_states.h"
2591#undef LOCKDEP_STATE
2592};
2593
2594static inline const char *state_name(enum lock_usage_bit bit)
2595{
2596        if (bit & LOCK_USAGE_READ_MASK)
2597                return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
2598        else
2599                return state_names[bit >> LOCK_USAGE_DIR_MASK];
2600}
2601
2602/*
2603 * The bit number is encoded like:
2604 *
2605 *  bit0: 0 exclusive, 1 read lock
2606 *  bit1: 0 used in irq, 1 irq enabled
2607 *  bit2-n: state
2608 */
2609static int exclusive_bit(int new_bit)
2610{
2611        int state = new_bit & LOCK_USAGE_STATE_MASK;
2612        int dir = new_bit & LOCK_USAGE_DIR_MASK;
2613
2614        /*
2615         * keep state, bit flip the direction and strip read.
2616         */
2617        return state | (dir ^ LOCK_USAGE_DIR_MASK);
2618}
2619
2620/*
2621 * Observe that when given a bitmask where each bitnr is encoded as above, a
2622 * right shift of the mask transforms the individual bitnrs as -1 and
2623 * conversely, a left shift transforms into +1 for the individual bitnrs.
2624 *
2625 * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
2626 * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
2627 * instead by subtracting the bit number by 2, or shifting the mask right by 2.
2628 *
2629 * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
2630 *
2631 * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
2632 * all bits set) and recompose with bitnr1 flipped.
2633 */
2634static unsigned long invert_dir_mask(unsigned long mask)
2635{
2636        unsigned long excl = 0;
2637
2638        /* Invert dir */
2639        excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
2640        excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
2641
2642        return excl;
2643}
2644
2645/*
2646 * Note that a LOCK_ENABLED_IRQ_*_READ usage and a LOCK_USED_IN_IRQ_*_READ
2647 * usage may cause deadlock too, for example:
2648 *
2649 * P1                           P2
2650 * <irq disabled>
2651 * write_lock(l1);              <irq enabled>
2652 *                              read_lock(l2);
2653 * write_lock(l2);
2654 *                              <in irq>
2655 *                              read_lock(l1);
2656 *
2657 * , in above case, l1 will be marked as LOCK_USED_IN_IRQ_HARDIRQ_READ and l2
2658 * will marked as LOCK_ENABLE_IRQ_HARDIRQ_READ, and this is a possible
2659 * deadlock.
2660 *
2661 * In fact, all of the following cases may cause deadlocks:
2662 *
2663 *       LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*
2664 *       LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*
2665 *       LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*_READ
2666 *       LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*_READ
2667 *
2668 * As a result, to calculate the "exclusive mask", first we invert the
2669 * direction (USED_IN/ENABLED) of the original mask, and 1) for all bits with
2670 * bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*). 2) for all
2671 * bits with bitnr0 cleared (LOCK_*_READ), add those with bitnr0 set (LOCK_*).
2672 */
2673static unsigned long exclusive_mask(unsigned long mask)
2674{
2675        unsigned long excl = invert_dir_mask(mask);
2676
2677        excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2678        excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2679
2680        return excl;
2681}
2682
2683/*
2684 * Retrieve the _possible_ original mask to which @mask is
2685 * exclusive. Ie: this is the opposite of exclusive_mask().
2686 * Note that 2 possible original bits can match an exclusive
2687 * bit: one has LOCK_USAGE_READ_MASK set, the other has it
2688 * cleared. So both are returned for each exclusive bit.
2689 */
2690static unsigned long original_mask(unsigned long mask)
2691{
2692        unsigned long excl = invert_dir_mask(mask);
2693
2694        /* Include read in existing usages */
2695        excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2696        excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2697
2698        return excl;
2699}
2700
2701/*
2702 * Find the first pair of bit match between an original
2703 * usage mask and an exclusive usage mask.
2704 */
2705static int find_exclusive_match(unsigned long mask,
2706                                unsigned long excl_mask,
2707                                enum lock_usage_bit *bitp,
2708                                enum lock_usage_bit *excl_bitp)
2709{
2710        int bit, excl, excl_read;
2711
2712        for_each_set_bit(bit, &mask, LOCK_USED) {
2713                /*
2714                 * exclusive_bit() strips the read bit, however,
2715                 * LOCK_ENABLED_IRQ_*_READ may cause deadlocks too, so we need
2716                 * to search excl | LOCK_USAGE_READ_MASK as well.
2717                 */
2718                excl = exclusive_bit(bit);
2719                excl_read = excl | LOCK_USAGE_READ_MASK;
2720                if (excl_mask & lock_flag(excl)) {
2721                        *bitp = bit;
2722                        *excl_bitp = excl;
2723                        return 0;
2724                } else if (excl_mask & lock_flag(excl_read)) {
2725                        *bitp = bit;
2726                        *excl_bitp = excl_read;
2727                        return 0;
2728                }
2729        }
2730        return -1;
2731}
2732
2733/*
2734 * Prove that the new dependency does not connect a hardirq-safe(-read)
2735 * lock with a hardirq-unsafe lock - to achieve this we search
2736 * the backwards-subgraph starting at <prev>, and the
2737 * forwards-subgraph starting at <next>:
2738 */
2739static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
2740                           struct held_lock *next)
2741{
2742        unsigned long usage_mask = 0, forward_mask, backward_mask;
2743        enum lock_usage_bit forward_bit = 0, backward_bit = 0;
2744        struct lock_list *target_entry1;
2745        struct lock_list *target_entry;
2746        struct lock_list this, that;
2747        enum bfs_result ret;
2748
2749        /*
2750         * Step 1: gather all hard/soft IRQs usages backward in an
2751         * accumulated usage mask.
2752         */
2753        bfs_init_rootb(&this, prev);
2754
2755        ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, usage_skip, NULL);
2756        if (bfs_error(ret)) {
2757                print_bfs_bug(ret);
2758                return 0;
2759        }
2760
2761        usage_mask &= LOCKF_USED_IN_IRQ_ALL;
2762        if (!usage_mask)
2763                return 1;
2764
2765        /*
2766         * Step 2: find exclusive uses forward that match the previous
2767         * backward accumulated mask.
2768         */
2769        forward_mask = exclusive_mask(usage_mask);
2770
2771        bfs_init_root(&that, next);
2772
2773        ret = find_usage_forwards(&that, forward_mask, &target_entry1);
2774        if (bfs_error(ret)) {
2775                print_bfs_bug(ret);
2776                return 0;
2777        }
2778        if (ret == BFS_RNOMATCH)
2779                return 1;
2780
2781        /*
2782         * Step 3: we found a bad match! Now retrieve a lock from the backward
2783         * list whose usage mask matches the exclusive usage mask from the
2784         * lock found on the forward list.
2785         *
2786         * Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering
2787         * the follow case:
2788         *
2789         * When trying to add A -> B to the graph, we find that there is a
2790         * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M,
2791         * that B -> ... -> M. However M is **softirq-safe**, if we use exact
2792         * invert bits of M's usage_mask, we will find another lock N that is
2793         * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not
2794         * cause a inversion deadlock.
2795         */
2796        backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
2797
2798        ret = find_usage_backwards(&this, backward_mask, &target_entry);
2799        if (bfs_error(ret)) {
2800                print_bfs_bug(ret);
2801                return 0;
2802        }
2803        if (DEBUG_LOCKS_WARN_ON(ret == BFS_RNOMATCH))
2804                return 1;
2805
2806        /*
2807         * Step 4: narrow down to a pair of incompatible usage bits
2808         * and report it.
2809         */
2810        ret = find_exclusive_match(target_entry->class->usage_mask,
2811                                   target_entry1->class->usage_mask,
2812                                   &backward_bit, &forward_bit);
2813        if (DEBUG_LOCKS_WARN_ON(ret == -1))
2814                return 1;
2815
2816        print_bad_irq_dependency(curr, &this, &that,
2817                                 target_entry, target_entry1,
2818                                 prev, next,
2819                                 backward_bit, forward_bit,
2820                                 state_name(backward_bit));
2821
2822        return 0;
2823}
2824
2825#else
2826
2827static inline int check_irq_usage(struct task_struct *curr,
2828                                  struct held_lock *prev, struct held_lock *next)
2829{
2830        return 1;
2831}
2832
2833static inline bool usage_skip(struct lock_list *entry, void *mask)
2834{
2835        return false;
2836}
2837
2838#endif /* CONFIG_TRACE_IRQFLAGS */
2839
2840#ifdef CONFIG_LOCKDEP_SMALL
2841/*
2842 * Check that the dependency graph starting at <src> can lead to
2843 * <target> or not. If it can, <src> -> <target> dependency is already
2844 * in the graph.
2845 *
2846 * Return BFS_RMATCH if it does, or BFS_RNOMATCH if it does not, return BFS_E* if
2847 * any error appears in the bfs search.
2848 */
2849static noinline enum bfs_result
2850check_redundant(struct held_lock *src, struct held_lock *target)
2851{
2852        enum bfs_result ret;
2853        struct lock_list *target_entry;
2854        struct lock_list src_entry;
2855
2856        bfs_init_root(&src_entry, src);
2857        /*
2858         * Special setup for check_redundant().
2859         *
2860         * To report redundant, we need to find a strong dependency path that
2861         * is equal to or stronger than <src> -> <target>. So if <src> is E,
2862         * we need to let __bfs() only search for a path starting at a -(E*)->,
2863         * we achieve this by setting the initial node's ->only_xr to true in
2864         * that case. And if <prev> is S, we set initial ->only_xr to false
2865         * because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant.
2866         */
2867        src_entry.only_xr = src->read == 0;
2868
2869        debug_atomic_inc(nr_redundant_checks);
2870
2871        /*
2872         * Note: we skip local_lock() for redundant check, because as the
2873         * comment in usage_skip(), A -> local_lock() -> B and A -> B are not
2874         * the same.
2875         */
2876        ret = check_path(target, &src_entry, hlock_equal, usage_skip, &target_entry);
2877
2878        if (ret == BFS_RMATCH)
2879                debug_atomic_inc(nr_redundant);
2880
2881        return ret;
2882}
2883
2884#else
2885
2886static inline enum bfs_result
2887check_redundant(struct held_lock *src, struct held_lock *target)
2888{
2889        return BFS_RNOMATCH;
2890}
2891
2892#endif
2893
2894static void inc_chains(int irq_context)
2895{
2896        if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2897                nr_hardirq_chains++;
2898        else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2899                nr_softirq_chains++;
2900        else
2901                nr_process_chains++;
2902}
2903
2904static void dec_chains(int irq_context)
2905{
2906        if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2907                nr_hardirq_chains--;
2908        else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2909                nr_softirq_chains--;
2910        else
2911                nr_process_chains--;
2912}
2913
2914static void
2915print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
2916{
2917        struct lock_class *next = hlock_class(nxt);
2918        struct lock_class *prev = hlock_class(prv);
2919
2920        printk(" Possible unsafe locking scenario:\n\n");
2921        printk("       CPU0\n");
2922        printk("       ----\n");
2923        printk("  lock(");
2924        __print_lock_name(prev);
2925        printk(KERN_CONT ");\n");
2926        printk("  lock(");
2927        __print_lock_name(next);
2928        printk(KERN_CONT ");\n");
2929        printk("\n *** DEADLOCK ***\n\n");
2930        printk(" May be due to missing lock nesting notation\n\n");
2931}
2932
2933static void
2934print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
2935                   struct held_lock *next)
2936{
2937        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2938                return;
2939
2940        pr_warn("\n");
2941        pr_warn("============================================\n");
2942        pr_warn("WARNING: possible recursive locking detected\n");
2943        print_kernel_ident();
2944        pr_warn("--------------------------------------------\n");
2945        pr_warn("%s/%d is trying to acquire lock:\n",
2946                curr->comm, task_pid_nr(curr));
2947        print_lock(next);
2948        pr_warn("\nbut task is already holding lock:\n");
2949        print_lock(prev);
2950
2951        pr_warn("\nother info that might help us debug this:\n");
2952        print_deadlock_scenario(next, prev);
2953        lockdep_print_held_locks(curr);
2954
2955        pr_warn("\nstack backtrace:\n");
2956        dump_stack();
2957}
2958
2959/*
2960 * Check whether we are holding such a class already.
2961 *
2962 * (Note that this has to be done separately, because the graph cannot
2963 * detect such classes of deadlocks.)
2964 *
2965 * Returns: 0 on deadlock detected, 1 on OK, 2 if another lock with the same
2966 * lock class is held but nest_lock is also held, i.e. we rely on the
2967 * nest_lock to avoid the deadlock.
2968 */
2969static int
2970check_deadlock(struct task_struct *curr, struct held_lock *next)
2971{
2972        struct held_lock *prev;
2973        struct held_lock *nest = NULL;
2974        int i;
2975
2976        for (i = 0; i < curr->lockdep_depth; i++) {
2977                prev = curr->held_locks + i;
2978
2979                if (prev->instance == next->nest_lock)
2980                        nest = prev;
2981
2982                if (hlock_class(prev) != hlock_class(next))
2983                        continue;
2984
2985                /*
2986                 * Allow read-after-read recursion of the same
2987                 * lock class (i.e. read_lock(lock)+read_lock(lock)):
2988                 */
2989                if ((next->read == 2) && prev->read)
2990                        continue;
2991
2992                /*
2993                 * We're holding the nest_lock, which serializes this lock's
2994                 * nesting behaviour.
2995                 */
2996                if (nest)
2997                        return 2;
2998
2999                print_deadlock_bug(curr, prev, next);
3000                return 0;
3001        }
3002        return 1;
3003}
3004
3005/*
3006 * There was a chain-cache miss, and we are about to add a new dependency
3007 * to a previous lock. We validate the following rules:
3008 *
3009 *  - would the adding of the <prev> -> <next> dependency create a
3010 *    circular dependency in the graph? [== circular deadlock]
3011 *
3012 *  - does the new prev->next dependency connect any hardirq-safe lock
3013 *    (in the full backwards-subgraph starting at <prev>) with any
3014 *    hardirq-unsafe lock (in the full forwards-subgraph starting at
3015 *    <next>)? [== illegal lock inversion with hardirq contexts]
3016 *
3017 *  - does the new prev->next dependency connect any softirq-safe lock
3018 *    (in the full backwards-subgraph starting at <prev>) with any
3019 *    softirq-unsafe lock (in the full forwards-subgraph starting at
3020 *    <next>)? [== illegal lock inversion with softirq contexts]
3021 *
3022 * any of these scenarios could lead to a deadlock.
3023 *
3024 * Then if all the validations pass, we add the forwards and backwards
3025 * dependency.
3026 */
3027static int
3028check_prev_add(struct task_struct *curr, struct held_lock *prev,
3029               struct held_lock *next, u16 distance,
3030               struct lock_trace **const trace)
3031{
3032        struct lock_list *entry;
3033        enum bfs_result ret;
3034
3035        if (!hlock_class(prev)->key || !hlock_class(next)->key) {
3036                /*
3037                 * The warning statements below may trigger a use-after-free
3038                 * of the class name. It is better to trigger a use-after free
3039                 * and to have the class name most of the time instead of not
3040                 * having the class name available.
3041                 */
3042                WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
3043                          "Detected use-after-free of lock class %px/%s\n",
3044                          hlock_class(prev),
3045                          hlock_class(prev)->name);
3046                WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
3047                          "Detected use-after-free of lock class %px/%s\n",
3048                          hlock_class(next),
3049                          hlock_class(next)->name);
3050                return 2;
3051        }
3052
3053        /*
3054         * Prove that the new <prev> -> <next> dependency would not
3055         * create a circular dependency in the graph. (We do this by
3056         * a breadth-first search into the graph starting at <next>,
3057         * and check whether we can reach <prev>.)
3058         *
3059         * The search is limited by the size of the circular queue (i.e.,
3060         * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
3061         * in the graph whose neighbours are to be checked.
3062         */
3063        ret = check_noncircular(next, prev, trace);
3064        if (unlikely(bfs_error(ret) || ret == BFS_RMATCH))
3065                return 0;
3066
3067        if (!check_irq_usage(curr, prev, next))
3068                return 0;
3069
3070        /*
3071         * Is the <prev> -> <next> dependency already present?
3072         *
3073         * (this may occur even though this is a new chain: consider
3074         *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
3075         *  chains - the second one will be new, but L1 already has
3076         *  L2 added to its dependency list, due to the first chain.)
3077         */
3078        list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
3079                if (entry->class == hlock_class(next)) {
3080                        if (distance == 1)
3081                                entry->distance = 1;
3082                        entry->dep |= calc_dep(prev, next);
3083
3084                        /*
3085                         * Also, update the reverse dependency in @next's
3086                         * ->locks_before list.
3087                         *
3088                         *  Here we reuse @entry as the cursor, which is fine
3089                         *  because we won't go to the next iteration of the
3090                         *  outer loop:
3091                         *
3092                         *  For normal cases, we return in the inner loop.
3093                         *
3094                         *  If we fail to return, we have inconsistency, i.e.
3095                         *  <prev>::locks_after contains <next> while
3096                         *  <next>::locks_before doesn't contain <prev>. In
3097                         *  that case, we return after the inner and indicate
3098                         *  something is wrong.
3099                         */
3100                        list_for_each_entry(entry, &hlock_class(next)->locks_before, entry) {
3101                                if (entry->class == hlock_class(prev)) {
3102                                        if (distance == 1)
3103                                                entry->distance = 1;
3104                                        entry->dep |= calc_depb(prev, next);
3105                                        return 1;
3106                                }
3107                        }
3108
3109                        /* <prev> is not found in <next>::locks_before */
3110                        return 0;
3111                }
3112        }
3113
3114        /*
3115         * Is the <prev> -> <next> link redundant?
3116         */
3117        ret = check_redundant(prev, next);
3118        if (bfs_error(ret))
3119                return 0;
3120        else if (ret == BFS_RMATCH)
3121                return 2;
3122
3123        if (!*trace) {
3124                *trace = save_trace();
3125                if (!*trace)
3126                        return 0;
3127        }
3128
3129        /*
3130         * Ok, all validations passed, add the new lock
3131         * to the previous lock's dependency list:
3132         */
3133        ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
3134                               &hlock_class(prev)->locks_after,
3135                               next->acquire_ip, distance,
3136                               calc_dep(prev, next),
3137                               *trace);
3138
3139        if (!ret)
3140                return 0;
3141
3142        ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
3143                               &hlock_class(next)->locks_before,
3144                               next->acquire_ip, distance,
3145                               calc_depb(prev, next),
3146                               *trace);
3147        if (!ret)
3148                return 0;
3149
3150        return 2;
3151}
3152
3153/*
3154 * Add the dependency to all directly-previous locks that are 'relevant'.
3155 * The ones that are relevant are (in increasing distance from curr):
3156 * all consecutive trylock entries and the final non-trylock entry - or
3157 * the end of this context's lock-chain - whichever comes first.
3158 */
3159static int
3160check_prevs_add(struct task_struct *curr, struct held_lock *next)
3161{
3162        struct lock_trace *trace = NULL;
3163        int depth = curr->lockdep_depth;
3164        struct held_lock *hlock;
3165
3166        /*
3167         * Debugging checks.
3168         *
3169         * Depth must not be zero for a non-head lock:
3170         */
3171        if (!depth)
3172                goto out_bug;
3173        /*
3174         * At least two relevant locks must exist for this
3175         * to be a head:
3176         */
3177        if (curr->held_locks[depth].irq_context !=
3178                        curr->held_locks[depth-1].irq_context)
3179                goto out_bug;
3180
3181        for (;;) {
3182                u16 distance = curr->lockdep_depth - depth + 1;
3183                hlock = curr->held_locks + depth - 1;
3184
3185                if (hlock->check) {
3186                        int ret = check_prev_add(curr, hlock, next, distance, &trace);
3187                        if (!ret)
3188                                return 0;
3189
3190                        /*
3191                         * Stop after the first non-trylock entry,
3192                         * as non-trylock entries have added their
3193                         * own direct dependencies already, so this
3194                         * lock is connected to them indirectly:
3195                         */
3196                        if (!hlock->trylock)
3197                                break;
3198                }
3199
3200                depth--;
3201                /*
3202                 * End of lock-stack?
3203                 */
3204                if (!depth)
3205                        break;
3206                /*
3207                 * Stop the search if we cross into another context:
3208                 */
3209                if (curr->held_locks[depth].irq_context !=
3210                                curr->held_locks[depth-1].irq_context)
3211                        break;
3212        }
3213        return 1;
3214out_bug:
3215        if (!debug_locks_off_graph_unlock())
3216                return 0;
3217
3218        /*
3219         * Clearly we all shouldn't be here, but since we made it we
3220         * can reliable say we messed up our state. See the above two
3221         * gotos for reasons why we could possibly end up here.
3222         */
3223        WARN_ON(1);
3224
3225        return 0;
3226}
3227
3228struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
3229static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
3230static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
3231unsigned long nr_zapped_lock_chains;
3232unsigned int nr_free_chain_hlocks;      /* Free chain_hlocks in buckets */
3233unsigned int nr_lost_chain_hlocks;      /* Lost chain_hlocks */
3234unsigned int nr_large_chain_blocks;     /* size > MAX_CHAIN_BUCKETS */
3235
3236/*
3237 * The first 2 chain_hlocks entries in the chain block in the bucket
3238 * list contains the following meta data:
3239 *
3240 *   entry[0]:
3241 *     Bit    15 - always set to 1 (it is not a class index)
3242 *     Bits 0-14 - upper 15 bits of the next block index
3243 *   entry[1]    - lower 16 bits of next block index
3244 *
3245 * A next block index of all 1 bits means it is the end of the list.
3246 *
3247 * On the unsized bucket (bucket-0), the 3rd and 4th entries contain
3248 * the chain block size:
3249 *
3250 *   entry[2] - upper 16 bits of the chain block size
3251 *   entry[3] - lower 16 bits of the chain block size
3252 */
3253#define MAX_CHAIN_BUCKETS       16
3254#define CHAIN_BLK_FLAG          (1U << 15)
3255#define CHAIN_BLK_LIST_END      0xFFFFU
3256
3257static int chain_block_buckets[MAX_CHAIN_BUCKETS];
3258
3259static inline int size_to_bucket(int size)
3260{
3261        if (size > MAX_CHAIN_BUCKETS)
3262                return 0;
3263
3264        return size - 1;
3265}
3266
3267/*
3268 * Iterate all the chain blocks in a bucket.
3269 */
3270#define for_each_chain_block(bucket, prev, curr)                \
3271        for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \
3272             (curr) >= 0;                                       \
3273             (prev) = (curr), (curr) = chain_block_next(curr))
3274
3275/*
3276 * next block or -1
3277 */
3278static inline int chain_block_next(int offset)
3279{
3280        int next = chain_hlocks[offset];
3281
3282        WARN_ON_ONCE(!(next & CHAIN_BLK_FLAG));
3283
3284        if (next == CHAIN_BLK_LIST_END)
3285                return -1;
3286
3287        next &= ~CHAIN_BLK_FLAG;
3288        next <<= 16;
3289        next |= chain_hlocks[offset + 1];
3290
3291        return next;
3292}
3293
3294/*
3295 * bucket-0 only
3296 */
3297static inline int chain_block_size(int offset)
3298{
3299        return (chain_hlocks[offset + 2] << 16) | chain_hlocks[offset + 3];
3300}
3301
3302static inline void init_chain_block(int offset, int next, int bucket, int size)
3303{
3304        chain_hlocks[offset] = (next >> 16) | CHAIN_BLK_FLAG;
3305        chain_hlocks[offset + 1] = (u16)next;
3306
3307        if (size && !bucket) {
3308                chain_hlocks[offset + 2] = size >> 16;
3309                chain_hlocks[offset + 3] = (u16)size;
3310        }
3311}
3312
3313static inline void add_chain_block(int offset, int size)
3314{
3315        int bucket = size_to_bucket(size);
3316        int next = chain_block_buckets[bucket];
3317        int prev, curr;
3318
3319        if (unlikely(size < 2)) {
3320                /*
3321                 * We can't store single entries on the freelist. Leak them.
3322                 *
3323                 * One possible way out would be to uniquely mark them, other
3324                 * than with CHAIN_BLK_FLAG, such that we can recover them when
3325                 * the block before it is re-added.
3326                 */
3327                if (size)
3328                        nr_lost_chain_hlocks++;
3329                return;
3330        }
3331
3332        nr_free_chain_hlocks += size;
3333        if (!bucket) {
3334                nr_large_chain_blocks++;
3335
3336                /*
3337                 * Variable sized, sort large to small.
3338                 */
3339                for_each_chain_block(0, prev, curr) {
3340                        if (size >= chain_block_size(curr))
3341                                break;
3342                }
3343                init_chain_block(offset, curr, 0, size);
3344                if (prev < 0)
3345                        chain_block_buckets[0] = offset;
3346                else
3347                        init_chain_block(prev, offset, 0, 0);
3348                return;
3349        }
3350        /*
3351         * Fixed size, add to head.
3352         */
3353        init_chain_block(offset, next, bucket, size);
3354        chain_block_buckets[bucket] = offset;
3355}
3356
3357/*
3358 * Only the first block in the list can be deleted.
3359 *
3360 * For the variable size bucket[0], the first block (the largest one) is
3361 * returned, broken up and put back into the pool. So if a chain block of
3362 * length > MAX_CHAIN_BUCKETS is ever used and zapped, it will just be
3363 * queued up after the primordial chain block and never be used until the
3364 * hlock entries in the primordial chain block is almost used up. That
3365 * causes fragmentation and reduce allocation efficiency. That can be
3366 * monitored by looking at the "large chain blocks" number in lockdep_stats.
3367 */
3368static inline void del_chain_block(int bucket, int size, int next)
3369{
3370        nr_free_chain_hlocks -= size;
3371        chain_block_buckets[bucket] = next;
3372
3373        if (!bucket)
3374                nr_large_chain_blocks--;
3375}
3376
3377static void init_chain_block_buckets(void)
3378{
3379        int i;
3380
3381        for (i = 0; i < MAX_CHAIN_BUCKETS; i++)
3382                chain_block_buckets[i] = -1;
3383
3384        add_chain_block(0, ARRAY_SIZE(chain_hlocks));
3385}
3386
3387/*
3388 * Return offset of a chain block of the right size or -1 if not found.
3389 *
3390 * Fairly simple worst-fit allocator with the addition of a number of size
3391 * specific free lists.
3392 */
3393static int alloc_chain_hlocks(int req)
3394{
3395        int bucket, curr, size;
3396
3397        /*
3398         * We rely on the MSB to act as an escape bit to denote freelist
3399         * pointers. Make sure this bit isn't set in 'normal' class_idx usage.
3400         */
3401        BUILD_BUG_ON((MAX_LOCKDEP_KEYS-1) & CHAIN_BLK_FLAG);
3402
3403        init_data_structures_once();
3404
3405        if (nr_free_chain_hlocks < req)
3406                return -1;
3407
3408        /*
3409         * We require a minimum of 2 (u16) entries to encode a freelist
3410         * 'pointer'.
3411         */
3412        req = max(req, 2);
3413        bucket = size_to_bucket(req);
3414        curr = chain_block_buckets[bucket];
3415
3416        if (bucket) {
3417                if (curr >= 0) {
3418                        del_chain_block(bucket, req, chain_block_next(curr));
3419                        return curr;
3420                }
3421                /* Try bucket 0 */
3422                curr = chain_block_buckets[0];
3423        }
3424
3425        /*
3426         * The variable sized freelist is sorted by size; the first entry is
3427         * the largest. Use it if it fits.
3428         */
3429        if (curr >= 0) {
3430                size = chain_block_size(curr);
3431                if (likely(size >= req)) {
3432                        del_chain_block(0, size, chain_block_next(curr));
3433                        add_chain_block(curr + req, size - req);
3434                        return curr;
3435                }
3436        }
3437
3438        /*
3439         * Last resort, split a block in a larger sized bucket.
3440         */
3441        for (size = MAX_CHAIN_BUCKETS; size > req; size--) {
3442                bucket = size_to_bucket(size);
3443                curr = chain_block_buckets[bucket];
3444                if (curr < 0)
3445                        continue;
3446
3447                del_chain_block(bucket, size, chain_block_next(curr));
3448                add_chain_block(curr + req, size - req);
3449                return curr;
3450        }
3451
3452        return -1;
3453}
3454
3455static inline void free_chain_hlocks(int base, int size)
3456{
3457        add_chain_block(base, max(size, 2));
3458}
3459
3460struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
3461{
3462        u16 chain_hlock = chain_hlocks[chain->base + i];
3463        unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
3464
3465        return lock_classes + class_idx;
3466}
3467
3468/*
3469 * Returns the index of the first held_lock of the current chain
3470 */
3471static inline int get_first_held_lock(struct task_struct *curr,
3472                                        struct held_lock *hlock)
3473{
3474        int i;
3475        struct held_lock *hlock_curr;
3476
3477        for (i = curr->lockdep_depth - 1; i >= 0; i--) {
3478                hlock_curr = curr->held_locks + i;
3479                if (hlock_curr->irq_context != hlock->irq_context)
3480                        break;
3481
3482        }
3483
3484        return ++i;
3485}
3486
3487#ifdef CONFIG_DEBUG_LOCKDEP
3488/*
3489 * Returns the next chain_key iteration
3490 */
3491static u64 print_chain_key_iteration(u16 hlock_id, u64 chain_key)
3492{
3493        u64 new_chain_key = iterate_chain_key(chain_key, hlock_id);
3494
3495        printk(" hlock_id:%d -> chain_key:%016Lx",
3496                (unsigned int)hlock_id,
3497                (unsigned long long)new_chain_key);
3498        return new_chain_key;
3499}
3500
3501static void
3502print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
3503{
3504        struct held_lock *hlock;
3505        u64 chain_key = INITIAL_CHAIN_KEY;
3506        int depth = curr->lockdep_depth;
3507        int i = get_first_held_lock(curr, hlock_next);
3508
3509        printk("depth: %u (irq_context %u)\n", depth - i + 1,
3510                hlock_next->irq_context);
3511        for (; i < depth; i++) {
3512                hlock = curr->held_locks + i;
3513                chain_key = print_chain_key_iteration(hlock_id(hlock), chain_key);
3514
3515                print_lock(hlock);
3516        }
3517
3518        print_chain_key_iteration(hlock_id(hlock_next), chain_key);
3519        print_lock(hlock_next);
3520}
3521
3522static void print_chain_keys_chain(struct lock_chain *chain)
3523{
3524        int i;
3525        u64 chain_key = INITIAL_CHAIN_KEY;
3526        u16 hlock_id;
3527
3528        printk("depth: %u\n", chain->depth);
3529        for (i = 0; i < chain->depth; i++) {
3530                hlock_id = chain_hlocks[chain->base + i];
3531                chain_key = print_chain_key_iteration(hlock_id, chain_key);
3532
3533                print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
3534                printk("\n");
3535        }
3536}
3537
3538static void print_collision(struct task_struct *curr,
3539                        struct held_lock *hlock_next,
3540                        struct lock_chain *chain)
3541{
3542        pr_warn("\n");
3543        pr_warn("============================\n");
3544        pr_warn("WARNING: chain_key collision\n");
3545        print_kernel_ident();
3546        pr_warn("----------------------------\n");
3547        pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
3548        pr_warn("Hash chain already cached but the contents don't match!\n");
3549
3550        pr_warn("Held locks:");
3551        print_chain_keys_held_locks(curr, hlock_next);
3552
3553        pr_warn("Locks in cached chain:");
3554        print_chain_keys_chain(chain);
3555
3556        pr_warn("\nstack backtrace:\n");
3557        dump_stack();
3558}
3559#endif
3560
3561/*
3562 * Checks whether the chain and the current held locks are consistent
3563 * in depth and also in content. If they are not it most likely means
3564 * that there was a collision during the calculation of the chain_key.
3565 * Returns: 0 not passed, 1 passed
3566 */
3567static int check_no_collision(struct task_struct *curr,
3568                        struct held_lock *hlock,
3569                        struct lock_chain *chain)
3570{
3571#ifdef CONFIG_DEBUG_LOCKDEP
3572        int i, j, id;
3573
3574        i = get_first_held_lock(curr, hlock);
3575
3576        if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
3577                print_collision(curr, hlock, chain);
3578                return 0;
3579        }
3580
3581        for (j = 0; j < chain->depth - 1; j++, i++) {
3582                id = hlock_id(&curr->held_locks[i]);
3583
3584                if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
3585                        print_collision(curr, hlock, chain);
3586                        return 0;
3587                }
3588        }
3589#endif
3590        return 1;
3591}
3592
3593/*
3594 * Given an index that is >= -1, return the index of the next lock chain.
3595 * Return -2 if there is no next lock chain.
3596 */
3597long lockdep_next_lockchain(long i)
3598{
3599        i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
3600        return i < ARRAY_SIZE(lock_chains) ? i : -2;
3601}
3602
3603unsigned long lock_chain_count(void)
3604{
3605        return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
3606}
3607
3608/* Must be called with the graph lock held. */
3609static struct lock_chain *alloc_lock_chain(void)
3610{
3611        int idx = find_first_zero_bit(lock_chains_in_use,
3612                                      ARRAY_SIZE(lock_chains));
3613
3614        if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
3615                return NULL;
3616        __set_bit(idx, lock_chains_in_use);
3617        return lock_chains + idx;
3618}
3619
3620/*
3621 * Adds a dependency chain into chain hashtable. And must be called with
3622 * graph_lock held.
3623 *
3624 * Return 0 if fail, and graph_lock is released.
3625 * Return 1 if succeed, with graph_lock held.
3626 */
3627static inline int add_chain_cache(struct task_struct *curr,
3628                                  struct held_lock *hlock,
3629                                  u64 chain_key)
3630{
3631        struct hlist_head *hash_head = chainhashentry(chain_key);
3632        struct lock_chain *chain;
3633        int i, j;
3634
3635        /*
3636         * The caller must hold the graph lock, ensure we've got IRQs
3637         * disabled to make this an IRQ-safe lock.. for recursion reasons
3638         * lockdep won't complain about its own locking errors.
3639         */
3640        if (lockdep_assert_locked())
3641                return 0;
3642
3643        chain = alloc_lock_chain();
3644        if (!chain) {
3645                if (!debug_locks_off_graph_unlock())
3646                        return 0;
3647
3648                print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
3649                dump_stack();
3650                return 0;
3651        }
3652        chain->chain_key = chain_key;
3653        chain->irq_context = hlock->irq_context;
3654        i = get_first_held_lock(curr, hlock);
3655        chain->depth = curr->lockdep_depth + 1 - i;
3656
3657        BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
3658        BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
3659        BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
3660
3661        j = alloc_chain_hlocks(chain->depth);
3662        if (j < 0) {
3663                if (!debug_locks_off_graph_unlock())
3664                        return 0;
3665
3666                print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
3667                dump_stack();
3668                return 0;
3669        }
3670
3671        chain->base = j;
3672        for (j = 0; j < chain->depth - 1; j++, i++) {
3673                int lock_id = hlock_id(curr->held_locks + i);
3674
3675                chain_hlocks[chain->base + j] = lock_id;
3676        }
3677        chain_hlocks[chain->base + j] = hlock_id(hlock);
3678        hlist_add_head_rcu(&chain->entry, hash_head);
3679        debug_atomic_inc(chain_lookup_misses);
3680        inc_chains(chain->irq_context);
3681
3682        return 1;
3683}
3684
3685/*
3686 * Look up a dependency chain. Must be called with either the graph lock or
3687 * the RCU read lock held.
3688 */
3689static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
3690{
3691        struct hlist_head *hash_head = chainhashentry(chain_key);
3692        struct lock_chain *chain;
3693
3694        hlist_for_each_entry_rcu(chain, hash_head, entry) {
3695                if (READ_ONCE(chain->chain_key) == chain_key) {
3696                        debug_atomic_inc(chain_lookup_hits);
3697                        return chain;
3698                }
3699        }
3700        return NULL;
3701}
3702
3703/*
3704 * If the key is not present yet in dependency chain cache then
3705 * add it and return 1 - in this case the new dependency chain is
3706 * validated. If the key is already hashed, return 0.
3707 * (On return with 1 graph_lock is held.)
3708 */
3709static inline int lookup_chain_cache_add(struct task_struct *curr,
3710                                         struct held_lock *hlock,
3711                                         u64 chain_key)
3712{
3713        struct lock_class *class = hlock_class(hlock);
3714        struct lock_chain *chain = lookup_chain_cache(chain_key);
3715
3716        if (chain) {
3717cache_hit:
3718                if (!check_no_collision(curr, hlock, chain))
3719                        return 0;
3720
3721                if (very_verbose(class)) {
3722                        printk("\nhash chain already cached, key: "
3723                                        "%016Lx tail class: [%px] %s\n",
3724                                        (unsigned long long)chain_key,
3725                                        class->key, class->name);
3726                }
3727
3728                return 0;
3729        }
3730
3731        if (very_verbose(class)) {
3732                printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
3733                        (unsigned long long)chain_key, class->key, class->name);
3734        }
3735
3736        if (!graph_lock())
3737                return 0;
3738
3739        /*
3740         * We have to walk the chain again locked - to avoid duplicates:
3741         */
3742        chain = lookup_chain_cache(chain_key);
3743        if (chain) {
3744                graph_unlock();
3745                goto cache_hit;
3746        }
3747
3748        if (!add_chain_cache(curr, hlock, chain_key))
3749                return 0;
3750
3751        return 1;
3752}
3753
3754static int validate_chain(struct task_struct *curr,
3755                          struct held_lock *hlock,
3756                          int chain_head, u64 chain_key)
3757{
3758        /*
3759         * Trylock needs to maintain the stack of held locks, but it
3760         * does not add new dependencies, because trylock can be done
3761         * in any order.
3762         *
3763         * We look up the chain_key and do the O(N^2) check and update of
3764         * the dependencies only if this is a new dependency chain.
3765         * (If lookup_chain_cache_add() return with 1 it acquires
3766         * graph_lock for us)
3767         */
3768        if (!hlock->trylock && hlock->check &&
3769            lookup_chain_cache_add(curr, hlock, chain_key)) {
3770                /*
3771                 * Check whether last held lock:
3772                 *
3773                 * - is irq-safe, if this lock is irq-unsafe
3774                 * - is softirq-safe, if this lock is hardirq-unsafe
3775                 *
3776                 * And check whether the new lock's dependency graph
3777                 * could lead back to the previous lock:
3778                 *
3779                 * - within the current held-lock stack
3780                 * - across our accumulated lock dependency records
3781                 *
3782                 * any of these scenarios could lead to a deadlock.
3783                 */
3784                /*
3785                 * The simple case: does the current hold the same lock
3786                 * already?
3787                 */
3788                int ret = check_deadlock(curr, hlock);
3789
3790                if (!ret)
3791                        return 0;
3792                /*
3793                 * Add dependency only if this lock is not the head
3794                 * of the chain, and if the new lock introduces no more
3795                 * lock dependency (because we already hold a lock with the
3796                 * same lock class) nor deadlock (because the nest_lock
3797                 * serializes nesting locks), see the comments for
3798                 * check_deadlock().
3799                 */
3800                if (!chain_head && ret != 2) {
3801                        if (!check_prevs_add(curr, hlock))
3802                                return 0;
3803                }
3804
3805                graph_unlock();
3806        } else {
3807                /* after lookup_chain_cache_add(): */
3808                if (unlikely(!debug_locks))
3809                        return 0;
3810        }
3811
3812        return 1;
3813}
3814#else
3815static inline int validate_chain(struct task_struct *curr,
3816                                 struct held_lock *hlock,
3817                                 int chain_head, u64 chain_key)
3818{
3819        return 1;
3820}
3821
3822static void init_chain_block_buckets(void)      { }
3823#endif /* CONFIG_PROVE_LOCKING */
3824
3825/*
3826 * We are building curr_chain_key incrementally, so double-check
3827 * it from scratch, to make sure that it's done correctly:
3828 */
3829static void check_chain_key(struct task_struct *curr)
3830{
3831#ifdef CONFIG_DEBUG_LOCKDEP
3832        struct held_lock *hlock, *prev_hlock = NULL;
3833        unsigned int i;
3834        u64 chain_key = INITIAL_CHAIN_KEY;
3835
3836        for (i = 0; i < curr->lockdep_depth; i++) {
3837                hlock = curr->held_locks + i;
3838                if (chain_key != hlock->prev_chain_key) {
3839                        debug_locks_off();
3840                        /*
3841                         * We got mighty confused, our chain keys don't match
3842                         * with what we expect, someone trample on our task state?
3843                         */
3844                        WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
3845                                curr->lockdep_depth, i,
3846                                (unsigned long long)chain_key,
3847                                (unsigned long long)hlock->prev_chain_key);
3848                        return;
3849                }
3850
3851                /*
3852                 * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
3853                 * it registered lock class index?
3854                 */
3855                if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
3856                        return;
3857
3858                if (prev_hlock && (prev_hlock->irq_context !=
3859                                                        hlock->irq_context))
3860                        chain_key = INITIAL_CHAIN_KEY;
3861                chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
3862                prev_hlock = hlock;
3863        }
3864        if (chain_key != curr->curr_chain_key) {
3865                debug_locks_off();
3866                /*
3867                 * More smoking hash instead of calculating it, damn see these
3868                 * numbers float.. I bet that a pink elephant stepped on my memory.
3869                 */
3870                WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
3871                        curr->lockdep_depth, i,
3872                        (unsigned long long)chain_key,
3873                        (unsigned long long)curr->curr_chain_key);
3874        }
3875#endif
3876}
3877
3878#ifdef CONFIG_PROVE_LOCKING
3879static int mark_lock(struct task_struct *curr, struct held_lock *this,
3880                     enum lock_usage_bit new_bit);
3881
3882static void print_usage_bug_scenario(struct held_lock *lock)
3883{
3884        struct lock_class *class = hlock_class(lock);
3885
3886        printk(" Possible unsafe locking scenario:\n\n");
3887        printk("       CPU0\n");
3888        printk("       ----\n");
3889        printk("  lock(");
3890        __print_lock_name(class);
3891        printk(KERN_CONT ");\n");
3892        printk("  <Interrupt>\n");
3893        printk("    lock(");
3894        __print_lock_name(class);
3895        printk(KERN_CONT ");\n");
3896        printk("\n *** DEADLOCK ***\n\n");
3897}
3898
3899static void
3900print_usage_bug(struct task_struct *curr, struct held_lock *this,
3901                enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
3902{
3903        if (!debug_locks_off() || debug_locks_silent)
3904                return;
3905
3906        pr_warn("\n");
3907        pr_warn("================================\n");
3908        pr_warn("WARNING: inconsistent lock state\n");
3909        print_kernel_ident();
3910        pr_warn("--------------------------------\n");
3911
3912        pr_warn("inconsistent {%s} -> {%s} usage.\n",
3913                usage_str[prev_bit], usage_str[new_bit]);
3914
3915        pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
3916                curr->comm, task_pid_nr(curr),
3917                lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
3918                lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
3919                lockdep_hardirqs_enabled(),
3920                lockdep_softirqs_enabled(curr));
3921        print_lock(this);
3922
3923        pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
3924        print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
3925
3926        print_irqtrace_events(curr);
3927        pr_warn("\nother info that might help us debug this:\n");
3928        print_usage_bug_scenario(this);
3929
3930        lockdep_print_held_locks(curr);
3931
3932        pr_warn("\nstack backtrace:\n");
3933        dump_stack();
3934}
3935
3936/*
3937 * Print out an error if an invalid bit is set:
3938 */
3939static inline int
3940valid_state(struct task_struct *curr, struct held_lock *this,
3941            enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
3942{
3943        if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
3944                graph_unlock();
3945                print_usage_bug(curr, this, bad_bit, new_bit);
3946                return 0;
3947        }
3948        return 1;
3949}
3950
3951
3952/*
3953 * print irq inversion bug:
3954 */
3955static void
3956print_irq_inversion_bug(struct task_struct *curr,
3957                        struct lock_list *root, struct lock_list *other,
3958                        struct held_lock *this, int forwards,
3959                        const char *irqclass)
3960{
3961        struct lock_list *entry = other;
3962        struct lock_list *middle = NULL;
3963        int depth;
3964
3965        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
3966                return;
3967
3968        pr_warn("\n");
3969        pr_warn("========================================================\n");
3970        pr_warn("WARNING: possible irq lock inversion dependency detected\n");
3971        print_kernel_ident();
3972        pr_warn("--------------------------------------------------------\n");
3973        pr_warn("%s/%d just changed the state of lock:\n",
3974                curr->comm, task_pid_nr(curr));
3975        print_lock(this);
3976        if (forwards)
3977                pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
3978        else
3979                pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
3980        print_lock_name(other->class);
3981        pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
3982
3983        pr_warn("\nother info that might help us debug this:\n");
3984
3985        /* Find a middle lock (if one exists) */
3986        depth = get_lock_depth(other);
3987        do {
3988                if (depth == 0 && (entry != root)) {
3989                        pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
3990                        break;
3991                }
3992                middle = entry;
3993                entry = get_lock_parent(entry);
3994                depth--;
3995        } while (entry && entry != root && (depth >= 0));
3996        if (forwards)
3997                print_irq_lock_scenario(root, other,
3998                        middle ? middle->class : root->class, other->class);
3999        else
4000                print_irq_lock_scenario(other, root,
4001                        middle ? middle->class : other->class, root->class);
4002
4003        lockdep_print_held_locks(curr);
4004
4005        pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
4006        root->trace = save_trace();
4007        if (!root->trace)
4008                return;
4009        print_shortest_lock_dependencies(other, root);
4010
4011        pr_warn("\nstack backtrace:\n");
4012        dump_stack();
4013}
4014
4015/*
4016 * Prove that in the forwards-direction subgraph starting at <this>
4017 * there is no lock matching <mask>:
4018 */
4019static int
4020check_usage_forwards(struct task_struct *curr, struct held_lock *this,
4021                     enum lock_usage_bit bit)
4022{
4023        enum bfs_result ret;
4024        struct lock_list root;
4025        struct lock_list *target_entry;
4026        enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
4027        unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
4028
4029        bfs_init_root(&root, this);
4030        ret = find_usage_forwards(&root, usage_mask, &target_entry);
4031        if (bfs_error(ret)) {
4032                print_bfs_bug(ret);
4033                return 0;
4034        }
4035        if (ret == BFS_RNOMATCH)
4036                return 1;
4037
4038        /* Check whether write or read usage is the match */
4039        if (target_entry->class->usage_mask & lock_flag(bit)) {
4040                print_irq_inversion_bug(curr, &root, target_entry,
4041                                        this, 1, state_name(bit));
4042        } else {
4043                print_irq_inversion_bug(curr, &root, target_entry,
4044                                        this, 1, state_name(read_bit));
4045        }
4046
4047        return 0;
4048}
4049
4050/*
4051 * Prove that in the backwards-direction subgraph starting at <this>
4052 * there is no lock matching <mask>:
4053 */
4054static int
4055check_usage_backwards(struct task_struct *curr, struct held_lock *this,
4056                      enum lock_usage_bit bit)
4057{
4058        enum bfs_result ret;
4059        struct lock_list root;
4060        struct lock_list *target_entry;
4061        enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
4062        unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
4063
4064        bfs_init_rootb(&root, this);
4065        ret = find_usage_backwards(&root, usage_mask, &target_entry);
4066        if (bfs_error(ret)) {
4067                print_bfs_bug(ret);
4068                return 0;
4069        }
4070        if (ret == BFS_RNOMATCH)
4071                return 1;
4072
4073        /* Check whether write or read usage is the match */
4074        if (target_entry->class->usage_mask & lock_flag(bit)) {
4075                print_irq_inversion_bug(curr, &root, target_entry,
4076                                        this, 0, state_name(bit));
4077        } else {
4078                print_irq_inversion_bug(curr, &root, target_entry,
4079                                        this, 0, state_name(read_bit));
4080        }
4081
4082        return 0;
4083}
4084
4085void print_irqtrace_events(struct task_struct *curr)
4086{
4087        const struct irqtrace_events *trace = &curr->irqtrace;
4088
4089        printk("irq event stamp: %u\n", trace->irq_events);
4090        printk("hardirqs last  enabled at (%u): [<%px>] %pS\n",
4091                trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
4092                (void *)trace->hardirq_enable_ip);
4093        printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
4094                trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip,
4095                (void *)trace->hardirq_disable_ip);
4096        printk("softirqs last  enabled at (%u): [<%px>] %pS\n",
4097                trace->softirq_enable_event, (void *)trace->softirq_enable_ip,
4098                (void *)trace->softirq_enable_ip);
4099        printk("softirqs last disabled at (%u): [<%px>] %pS\n",
4100                trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
4101                (void *)trace->softirq_disable_ip);
4102}
4103
4104static int HARDIRQ_verbose(struct lock_class *class)
4105{
4106#if HARDIRQ_VERBOSE
4107        return class_filter(class);
4108#endif
4109        return 0;
4110}
4111
4112static int SOFTIRQ_verbose(struct lock_class *class)
4113{
4114#if SOFTIRQ_VERBOSE
4115        return class_filter(class);
4116#endif
4117        return 0;
4118}
4119
4120static int (*state_verbose_f[])(struct lock_class *class) = {
4121#define LOCKDEP_STATE(__STATE) \
4122        __STATE##_verbose,
4123#include "lockdep_states.h"
4124#undef LOCKDEP_STATE
4125};
4126
4127static inline int state_verbose(enum lock_usage_bit bit,
4128                                struct lock_class *class)
4129{
4130        return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
4131}
4132
4133typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
4134                             enum lock_usage_bit bit, const char *name);
4135
4136static int
4137mark_lock_irq(struct task_struct *curr, struct held_lock *this,
4138                enum lock_usage_bit new_bit)
4139{
4140        int excl_bit = exclusive_bit(new_bit);
4141        int read = new_bit & LOCK_USAGE_READ_MASK;
4142        int dir = new_bit & LOCK_USAGE_DIR_MASK;
4143
4144        /*
4145         * Validate that this particular lock does not have conflicting
4146         * usage states.
4147         */
4148        if (!valid_state(curr, this, new_bit, excl_bit))
4149                return 0;
4150
4151        /*
4152         * Check for read in write conflicts
4153         */
4154        if (!read && !valid_state(curr, this, new_bit,
4155                                  excl_bit + LOCK_USAGE_READ_MASK))
4156                return 0;
4157
4158
4159        /*
4160         * Validate that the lock dependencies don't have conflicting usage
4161         * states.
4162         */
4163        if (dir) {
4164                /*
4165                 * mark ENABLED has to look backwards -- to ensure no dependee
4166                 * has USED_IN state, which, again, would allow  recursion deadlocks.
4167                 */
4168                if (!check_usage_backwards(curr, this, excl_bit))
4169                        return 0;
4170        } else {
4171                /*
4172                 * mark USED_IN has to look forwards -- to ensure no dependency
4173                 * has ENABLED state, which would allow recursion deadlocks.
4174                 */
4175                if (!check_usage_forwards(curr, this, excl_bit))
4176                        return 0;
4177        }
4178
4179        if (state_verbose(new_bit, hlock_class(this)))
4180                return 2;
4181
4182        return 1;
4183}
4184
4185/*
4186 * Mark all held locks with a usage bit:
4187 */
4188static int
4189mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
4190{
4191        struct held_lock *hlock;
4192        int i;
4193
4194        for (i = 0; i < curr->lockdep_depth; i++) {
4195                enum lock_usage_bit hlock_bit = base_bit;
4196                hlock = curr->held_locks + i;
4197
4198                if (hlock->read)
4199                        hlock_bit += LOCK_USAGE_READ_MASK;
4200
4201                BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
4202
4203                if (!hlock->check)
4204                        continue;
4205
4206                if (!mark_lock(curr, hlock, hlock_bit))
4207                        return 0;
4208        }
4209
4210        return 1;
4211}
4212
4213/*
4214 * Hardirqs will be enabled:
4215 */
4216static void __trace_hardirqs_on_caller(void)
4217{
4218        struct task_struct *curr = current;
4219
4220        /*
4221         * We are going to turn hardirqs on, so set the
4222         * usage bit for all held locks:
4223         */
4224        if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
4225                return;
4226        /*
4227         * If we have softirqs enabled, then set the usage
4228         * bit for all held locks. (disabled hardirqs prevented
4229         * this bit from being set before)
4230         */
4231        if (curr->softirqs_enabled)
4232                mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
4233}
4234
4235/**
4236 * lockdep_hardirqs_on_prepare - Prepare for enabling interrupts
4237 * @ip:         Caller address
4238 *
4239 * Invoked before a possible transition to RCU idle from exit to user or
4240 * guest mode. This ensures that all RCU operations are done before RCU
4241 * stops watching. After the RCU transition lockdep_hardirqs_on() has to be
4242 * invoked to set the final state.
4243 */
4244void lockdep_hardirqs_on_prepare(unsigned long ip)
4245{
4246        if (unlikely(!debug_locks))
4247                return;
4248
4249        /*
4250         * NMIs do not (and cannot) track lock dependencies, nothing to do.
4251         */
4252        if (unlikely(in_nmi()))
4253                return;
4254
4255        if (unlikely(this_cpu_read(lockdep_recursion)))
4256                return;
4257
4258        if (unlikely(lockdep_hardirqs_enabled())) {
4259                /*
4260                 * Neither irq nor preemption are disabled here
4261                 * so this is racy by nature but losing one hit
4262                 * in a stat is not a big deal.
4263                 */
4264                __debug_atomic_inc(redundant_hardirqs_on);
4265                return;
4266        }
4267
4268        /*
4269         * We're enabling irqs and according to our state above irqs weren't
4270         * already enabled, yet we find the hardware thinks they are in fact
4271         * enabled.. someone messed up their IRQ state tracing.
4272         */
4273        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4274                return;
4275
4276        /*
4277         * See the fine text that goes along with this variable definition.
4278         */
4279        if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
4280                return;
4281
4282        /*
4283         * Can't allow enabling interrupts while in an interrupt handler,
4284         * that's general bad form and such. Recursion, limited stack etc..
4285         */
4286        if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
4287                return;
4288
4289        current->hardirq_chain_key = current->curr_chain_key;
4290
4291        lockdep_recursion_inc();
4292        __trace_hardirqs_on_caller();
4293        lockdep_recursion_finish();
4294}
4295EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
4296
4297void noinstr lockdep_hardirqs_on(unsigned long ip)
4298{
4299        struct irqtrace_events *trace = &current->irqtrace;
4300
4301        if (unlikely(!debug_locks))
4302                return;
4303
4304        /*
4305         * NMIs can happen in the middle of local_irq_{en,dis}able() where the
4306         * tracking state and hardware state are out of sync.
4307         *
4308         * NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from,
4309         * and not rely on hardware state like normal interrupts.
4310         */
4311        if (unlikely(in_nmi())) {
4312                if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
4313                        return;
4314
4315                /*
4316                 * Skip:
4317                 *  - recursion check, because NMI can hit lockdep;
4318                 *  - hardware state check, because above;
4319                 *  - chain_key check, see lockdep_hardirqs_on_prepare().
4320                 */
4321                goto skip_checks;
4322        }
4323
4324        if (unlikely(this_cpu_read(lockdep_recursion)))
4325                return;
4326
4327        if (lockdep_hardirqs_enabled()) {
4328                /*
4329                 * Neither irq nor preemption are disabled here
4330                 * so this is racy by nature but losing one hit
4331                 * in a stat is not a big deal.
4332                 */
4333                __debug_atomic_inc(redundant_hardirqs_on);
4334                return;
4335        }
4336
4337        /*
4338         * We're enabling irqs and according to our state above irqs weren't
4339         * already enabled, yet we find the hardware thinks they are in fact
4340         * enabled.. someone messed up their IRQ state tracing.
4341         */
4342        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4343                return;
4344
4345        /*
4346         * Ensure the lock stack remained unchanged between
4347         * lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
4348         */
4349        DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
4350                            current->curr_chain_key);
4351
4352skip_checks:
4353        /* we'll do an OFF -> ON transition: */
4354        __this_cpu_write(hardirqs_enabled, 1);
4355        trace->hardirq_enable_ip = ip;
4356        trace->hardirq_enable_event = ++trace->irq_events;
4357        debug_atomic_inc(hardirqs_on_events);
4358}
4359EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
4360
4361/*
4362 * Hardirqs were disabled:
4363 */
4364void noinstr lockdep_hardirqs_off(unsigned long ip)
4365{
4366        if (unlikely(!debug_locks))
4367                return;
4368
4369        /*
4370         * Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep;
4371         * they will restore the software state. This ensures the software
4372         * state is consistent inside NMIs as well.
4373         */
4374        if (in_nmi()) {
4375                if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
4376                        return;
4377        } else if (__this_cpu_read(lockdep_recursion))
4378                return;
4379
4380        /*
4381         * So we're supposed to get called after you mask local IRQs, but for
4382         * some reason the hardware doesn't quite think you did a proper job.
4383         */
4384        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4385                return;
4386
4387        if (lockdep_hardirqs_enabled()) {
4388                struct irqtrace_events *trace = &current->irqtrace;
4389
4390                /*
4391                 * We have done an ON -> OFF transition:
4392                 */
4393                __this_cpu_write(hardirqs_enabled, 0);
4394                trace->hardirq_disable_ip = ip;
4395                trace->hardirq_disable_event = ++trace->irq_events;
4396                debug_atomic_inc(hardirqs_off_events);
4397        } else {
4398                debug_atomic_inc(redundant_hardirqs_off);
4399        }
4400}
4401EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
4402
4403/*
4404 * Softirqs will be enabled:
4405 */
4406void lockdep_softirqs_on(unsigned long ip)
4407{
4408        struct irqtrace_events *trace = &current->irqtrace;
4409
4410        if (unlikely(!lockdep_enabled()))
4411                return;
4412
4413        /*
4414         * We fancy IRQs being disabled here, see softirq.c, avoids
4415         * funny state and nesting things.
4416         */
4417        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4418                return;
4419
4420        if (current->softirqs_enabled) {
4421                debug_atomic_inc(redundant_softirqs_on);
4422                return;
4423        }
4424
4425        lockdep_recursion_inc();
4426        /*
4427         * We'll do an OFF -> ON transition:
4428         */
4429        current->softirqs_enabled = 1;
4430        trace->softirq_enable_ip = ip;
4431        trace->softirq_enable_event = ++trace->irq_events;
4432        debug_atomic_inc(softirqs_on_events);
4433        /*
4434         * We are going to turn softirqs on, so set the
4435         * usage bit for all held locks, if hardirqs are
4436         * enabled too:
4437         */
4438        if (lockdep_hardirqs_enabled())
4439                mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
4440        lockdep_recursion_finish();
4441}
4442
4443/*
4444 * Softirqs were disabled:
4445 */
4446void lockdep_softirqs_off(unsigned long ip)
4447{
4448        if (unlikely(!lockdep_enabled()))
4449                return;
4450
4451        /*
4452         * We fancy IRQs being disabled here, see softirq.c
4453         */
4454        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4455                return;
4456
4457        if (current->softirqs_enabled) {
4458                struct irqtrace_events *trace = &current->irqtrace;
4459
4460                /*
4461                 * We have done an ON -> OFF transition:
4462                 */
4463                current->softirqs_enabled = 0;
4464                trace->softirq_disable_ip = ip;
4465                trace->softirq_disable_event = ++trace->irq_events;
4466                debug_atomic_inc(softirqs_off_events);
4467                /*
4468                 * Whoops, we wanted softirqs off, so why aren't they?
4469                 */
4470                DEBUG_LOCKS_WARN_ON(!softirq_count());
4471        } else
4472                debug_atomic_inc(redundant_softirqs_off);
4473}
4474
4475static int
4476mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4477{
4478        if (!check)
4479                goto lock_used;
4480
4481        /*
4482         * If non-trylock use in a hardirq or softirq context, then
4483         * mark the lock as used in these contexts:
4484         */
4485        if (!hlock->trylock) {
4486                if (hlock->read) {
4487                        if (lockdep_hardirq_context())
4488                                if (!mark_lock(curr, hlock,
4489                                                LOCK_USED_IN_HARDIRQ_READ))
4490                                        return 0;
4491                        if (curr->softirq_context)
4492                                if (!mark_lock(curr, hlock,
4493                                                LOCK_USED_IN_SOFTIRQ_READ))
4494                                        return 0;
4495                } else {
4496                        if (lockdep_hardirq_context())
4497                                if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
4498                                        return 0;
4499                        if (curr->softirq_context)
4500                                if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
4501                                        return 0;
4502                }
4503        }
4504        if (!hlock->hardirqs_off) {
4505                if (hlock->read) {
4506                        if (!mark_lock(curr, hlock,
4507                                        LOCK_ENABLED_HARDIRQ_READ))
4508                                return 0;
4509                        if (curr->softirqs_enabled)
4510                                if (!mark_lock(curr, hlock,
4511                                                LOCK_ENABLED_SOFTIRQ_READ))
4512                                        return 0;
4513                } else {
4514                        if (!mark_lock(curr, hlock,
4515                                        LOCK_ENABLED_HARDIRQ))
4516                                return 0;
4517                        if (curr->softirqs_enabled)
4518                                if (!mark_lock(curr, hlock,
4519                                                LOCK_ENABLED_SOFTIRQ))
4520                                        return 0;
4521                }
4522        }
4523
4524lock_used:
4525        /* mark it as used: */
4526        if (!mark_lock(curr, hlock, LOCK_USED))
4527                return 0;
4528
4529        return 1;
4530}
4531
4532static inline unsigned int task_irq_context(struct task_struct *task)
4533{
4534        return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() +
4535               LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
4536}
4537
4538static int separate_irq_context(struct task_struct *curr,
4539                struct held_lock *hlock)
4540{
4541        unsigned int depth = curr->lockdep_depth;
4542
4543        /*
4544         * Keep track of points where we cross into an interrupt context:
4545         */
4546        if (depth) {
4547                struct held_lock *prev_hlock;
4548
4549                prev_hlock = curr->held_locks + depth-1;
4550                /*
4551                 * If we cross into another context, reset the
4552                 * hash key (this also prevents the checking and the
4553                 * adding of the dependency to 'prev'):
4554                 */
4555                if (prev_hlock->irq_context != hlock->irq_context)
4556                        return 1;
4557        }
4558        return 0;
4559}
4560
4561/*
4562 * Mark a lock with a usage bit, and validate the state transition:
4563 */
4564static int mark_lock(struct task_struct *curr, struct held_lock *this,
4565                             enum lock_usage_bit new_bit)
4566{
4567        unsigned int new_mask, ret = 1;
4568
4569        if (new_bit >= LOCK_USAGE_STATES) {
4570                DEBUG_LOCKS_WARN_ON(1);
4571                return 0;
4572        }
4573
4574        if (new_bit == LOCK_USED && this->read)
4575                new_bit = LOCK_USED_READ;
4576
4577        new_mask = 1 << new_bit;
4578
4579        /*
4580         * If already set then do not dirty the cacheline,
4581         * nor do any checks:
4582         */
4583        if (likely(hlock_class(this)->usage_mask & new_mask))
4584                return 1;
4585
4586        if (!graph_lock())
4587                return 0;
4588        /*
4589         * Make sure we didn't race:
4590         */
4591        if (unlikely(hlock_class(this)->usage_mask & new_mask))
4592                goto unlock;
4593
4594        if (!hlock_class(this)->usage_mask)
4595                debug_atomic_dec(nr_unused_locks);
4596
4597        hlock_class(this)->usage_mask |= new_mask;
4598
4599        if (new_bit < LOCK_TRACE_STATES) {
4600                if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
4601                        return 0;
4602        }
4603
4604        if (new_bit < LOCK_USED) {
4605                ret = mark_lock_irq(curr, this, new_bit);
4606                if (!ret)
4607                        return 0;
4608        }
4609
4610unlock:
4611        graph_unlock();
4612
4613        /*
4614         * We must printk outside of the graph_lock:
4615         */
4616        if (ret == 2) {
4617                printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
4618                print_lock(this);
4619                print_irqtrace_events(curr);
4620                dump_stack();
4621        }
4622
4623        return ret;
4624}
4625
4626static inline short task_wait_context(struct task_struct *curr)
4627{
4628        /*
4629         * Set appropriate wait type for the context; for IRQs we have to take
4630         * into account force_irqthread as that is implied by PREEMPT_RT.
4631         */
4632        if (lockdep_hardirq_context()) {
4633                /*
4634                 * Check if force_irqthreads will run us threaded.
4635                 */
4636                if (curr->hardirq_threaded || curr->irq_config)
4637                        return LD_WAIT_CONFIG;
4638
4639                return LD_WAIT_SPIN;
4640        } else if (curr->softirq_context) {
4641                /*
4642                 * Softirqs are always threaded.
4643                 */
4644                return LD_WAIT_CONFIG;
4645        }
4646
4647        return LD_WAIT_MAX;
4648}
4649
4650static int
4651print_lock_invalid_wait_context(struct task_struct *curr,
4652                                struct held_lock *hlock)
4653{
4654        short curr_inner;
4655
4656        if (!debug_locks_off())
4657                return 0;
4658        if (debug_locks_silent)
4659                return 0;
4660
4661        pr_warn("\n");
4662        pr_warn("=============================\n");
4663        pr_warn("[ BUG: Invalid wait context ]\n");
4664        print_kernel_ident();
4665        pr_warn("-----------------------------\n");
4666
4667        pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
4668        print_lock(hlock);
4669
4670        pr_warn("other info that might help us debug this:\n");
4671
4672        curr_inner = task_wait_context(curr);
4673        pr_warn("context-{%d:%d}\n", curr_inner, curr_inner);
4674
4675        lockdep_print_held_locks(curr);
4676
4677        pr_warn("stack backtrace:\n");
4678        dump_stack();
4679
4680        return 0;
4681}
4682
4683/*
4684 * Verify the wait_type context.
4685 *
4686 * This check validates we take locks in the right wait-type order; that is it
4687 * ensures that we do not take mutexes inside spinlocks and do not attempt to
4688 * acquire spinlocks inside raw_spinlocks and the sort.
4689 *
4690 * The entire thing is slightly more complex because of RCU, RCU is a lock that
4691 * can be taken from (pretty much) any context but also has constraints.
4692 * However when taken in a stricter environment the RCU lock does not loosen
4693 * the constraints.
4694 *
4695 * Therefore we must look for the strictest environment in the lock stack and
4696 * compare that to the lock we're trying to acquire.
4697 */
4698static int check_wait_context(struct task_struct *curr, struct held_lock *next)
4699{
4700        u8 next_inner = hlock_class(next)->wait_type_inner;
4701        u8 next_outer = hlock_class(next)->wait_type_outer;
4702        u8 curr_inner;
4703        int depth;
4704
4705        if (!next_inner || next->trylock)
4706                return 0;
4707
4708        if (!next_outer)
4709                next_outer = next_inner;
4710
4711        /*
4712         * Find start of current irq_context..
4713         */
4714        for (depth = curr->lockdep_depth - 1; depth >= 0; depth--) {
4715                struct held_lock *prev = curr->held_locks + depth;
4716                if (prev->irq_context != next->irq_context)
4717                        break;
4718        }
4719        depth++;
4720
4721        curr_inner = task_wait_context(curr);
4722
4723        for (; depth < curr->lockdep_depth; depth++) {
4724                struct held_lock *prev = curr->held_locks + depth;
4725                u8 prev_inner = hlock_class(prev)->wait_type_inner;
4726
4727                if (prev_inner) {
4728                        /*
4729                         * We can have a bigger inner than a previous one
4730                         * when outer is smaller than inner, as with RCU.
4731                         *
4732                         * Also due to trylocks.
4733                         */
4734                        curr_inner = min(curr_inner, prev_inner);
4735                }
4736        }
4737
4738        if (next_outer > curr_inner)
4739                return print_lock_invalid_wait_context(curr, next);
4740
4741        return 0;
4742}
4743
4744#else /* CONFIG_PROVE_LOCKING */
4745
4746static inline int
4747mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4748{
4749        return 1;
4750}
4751
4752static inline unsigned int task_irq_context(struct task_struct *task)
4753{
4754        return 0;
4755}
4756
4757static inline int separate_irq_context(struct task_struct *curr,
4758                struct held_lock *hlock)
4759{
4760        return 0;
4761}
4762
4763static inline int check_wait_context(struct task_struct *curr,
4764                                     struct held_lock *next)
4765{
4766        return 0;
4767}
4768
4769#endif /* CONFIG_PROVE_LOCKING */
4770
4771/*
4772 * Initialize a lock instance's lock-class mapping info:
4773 */
4774void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
4775                            struct lock_class_key *key, int subclass,
4776                            u8 inner, u8 outer, u8 lock_type)
4777{
4778        int i;
4779
4780        for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
4781                lock->class_cache[i] = NULL;
4782
4783#ifdef CONFIG_LOCK_STAT
4784        lock->cpu = raw_smp_processor_id();
4785#endif
4786
4787        /*
4788         * Can't be having no nameless bastards around this place!
4789         */
4790        if (DEBUG_LOCKS_WARN_ON(!name)) {
4791                lock->name = "NULL";
4792                return;
4793        }
4794
4795        lock->name = name;
4796
4797        lock->wait_type_outer = outer;
4798        lock->wait_type_inner = inner;
4799        lock->lock_type = lock_type;
4800
4801        /*
4802         * No key, no joy, we need to hash something.
4803         */
4804        if (DEBUG_LOCKS_WARN_ON(!key))
4805                return;
4806        /*
4807         * Sanity check, the lock-class key must either have been allocated
4808         * statically or must have been registered as a dynamic key.
4809         */
4810        if (!static_obj(key) && !is_dynamic_key(key)) {
4811                if (debug_locks)
4812                        printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
4813                DEBUG_LOCKS_WARN_ON(1);
4814                return;
4815        }
4816        lock->key = key;
4817
4818        if (unlikely(!debug_locks))
4819                return;
4820
4821        if (subclass) {
4822                unsigned long flags;
4823
4824                if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
4825                        return;
4826
4827                raw_local_irq_save(flags);
4828                lockdep_recursion_inc();
4829                register_lock_class(lock, subclass, 1);
4830                lockdep_recursion_finish();
4831                raw_local_irq_restore(flags);
4832        }
4833}
4834EXPORT_SYMBOL_GPL(lockdep_init_map_type);
4835
4836struct lock_class_key __lockdep_no_validate__;
4837EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
4838
4839static void
4840print_lock_nested_lock_not_held(struct task_struct *curr,
4841                                struct held_lock *hlock,
4842                                unsigned long ip)
4843{
4844        if (!debug_locks_off())
4845                return;
4846        if (debug_locks_silent)
4847                return;
4848
4849        pr_warn("\n");
4850        pr_warn("==================================\n");
4851        pr_warn("WARNING: Nested lock was not taken\n");
4852        print_kernel_ident();
4853        pr_warn("----------------------------------\n");
4854
4855        pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
4856        print_lock(hlock);
4857
4858        pr_warn("\nbut this task is not holding:\n");
4859        pr_warn("%s\n", hlock->nest_lock->name);
4860
4861        pr_warn("\nstack backtrace:\n");
4862        dump_stack();
4863
4864        pr_warn("\nother info that might help us debug this:\n");
4865        lockdep_print_held_locks(curr);
4866
4867        pr_warn("\nstack backtrace:\n");
4868        dump_stack();
4869}
4870
4871static int __lock_is_held(const struct lockdep_map *lock, int read);
4872
4873/*
4874 * This gets called for every mutex_lock*()/spin_lock*() operation.
4875 * We maintain the dependency maps and validate the locking attempt:
4876 *
4877 * The callers must make sure that IRQs are disabled before calling it,
4878 * otherwise we could get an interrupt which would want to take locks,
4879 * which would end up in lockdep again.
4880 */
4881static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
4882                          int trylock, int read, int check, int hardirqs_off,
4883                          struct lockdep_map *nest_lock, unsigned long ip,
4884                          int references, int pin_count)
4885{
4886        struct task_struct *curr = current;
4887        struct lock_class *class = NULL;
4888        struct held_lock *hlock;
4889        unsigned int depth;
4890        int chain_head = 0;
4891        int class_idx;
4892        u64 chain_key;
4893
4894        if (unlikely(!debug_locks))
4895                return 0;
4896
4897        if (!prove_locking || lock->key == &__lockdep_no_validate__)
4898                check = 0;
4899
4900        if (subclass < NR_LOCKDEP_CACHING_CLASSES)
4901                class = lock->class_cache[subclass];
4902        /*
4903         * Not cached?
4904         */
4905        if (unlikely(!class)) {
4906                class = register_lock_class(lock, subclass, 0);
4907                if (!class)
4908                        return 0;
4909        }
4910
4911        debug_class_ops_inc(class);
4912
4913        if (very_verbose(class)) {
4914                printk("\nacquire class [%px] %s", class->key, class->name);
4915                if (class->name_version > 1)
4916                        printk(KERN_CONT "#%d", class->name_version);
4917                printk(KERN_CONT "\n");
4918                dump_stack();
4919        }
4920
4921        /*
4922         * Add the lock to the list of currently held locks.
4923         * (we dont increase the depth just yet, up until the
4924         * dependency checks are done)
4925         */
4926        depth = curr->lockdep_depth;
4927        /*
4928         * Ran out of static storage for our per-task lock stack again have we?
4929         */
4930        if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
4931                return 0;
4932
4933        class_idx = class - lock_classes;
4934
4935        if (depth) { /* we're holding locks */
4936                hlock = curr->held_locks + depth - 1;
4937                if (hlock->class_idx == class_idx && nest_lock) {
4938                        if (!references)
4939                                references++;
4940
4941                        if (!hlock->references)
4942                                hlock->references++;
4943
4944                        hlock->references += references;
4945
4946                        /* Overflow */
4947                        if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
4948                                return 0;
4949
4950                        return 2;
4951                }
4952        }
4953
4954        hlock = curr->held_locks + depth;
4955        /*
4956         * Plain impossible, we just registered it and checked it weren't no
4957         * NULL like.. I bet this mushroom I ate was good!
4958         */
4959        if (DEBUG_LOCKS_WARN_ON(!class))
4960                return 0;
4961        hlock->class_idx = class_idx;
4962        hlock->acquire_ip = ip;
4963        hlock->instance = lock;
4964        hlock->nest_lock = nest_lock;
4965        hlock->irq_context = task_irq_context(curr);
4966        hlock->trylock = trylock;
4967        hlock->read = read;
4968        hlock->check = check;
4969        hlock->hardirqs_off = !!hardirqs_off;
4970        hlock->references = references;
4971#ifdef CONFIG_LOCK_STAT
4972        hlock->waittime_stamp = 0;
4973        hlock->holdtime_stamp = lockstat_clock();
4974#endif
4975        hlock->pin_count = pin_count;
4976
4977        if (check_wait_context(curr, hlock))
4978                return 0;
4979
4980        /* Initialize the lock usage bit */
4981        if (!mark_usage(curr, hlock, check))
4982                return 0;
4983
4984        /*
4985         * Calculate the chain hash: it's the combined hash of all the
4986         * lock keys along the dependency chain. We save the hash value
4987         * at every step so that we can get the current hash easily
4988         * after unlock. The chain hash is then used to cache dependency
4989         * results.
4990         *
4991         * The 'key ID' is what is the most compact key value to drive
4992         * the hash, not class->key.
4993         */
4994        /*
4995         * Whoops, we did it again.. class_idx is invalid.
4996         */
4997        if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use)))
4998                return 0;
4999
5000        chain_key = curr->curr_chain_key;
5001        if (!depth) {
5002                /*
5003                 * How can we have a chain hash when we ain't got no keys?!
5004                 */
5005                if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
5006                        return 0;
5007                chain_head = 1;
5008        }
5009
5010        hlock->prev_chain_key = chain_key;
5011        if (separate_irq_context(curr, hlock)) {
5012                chain_key = INITIAL_CHAIN_KEY;
5013                chain_head = 1;
5014        }
5015        chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
5016
5017        if (nest_lock && !__lock_is_held(nest_lock, -1)) {
5018                print_lock_nested_lock_not_held(curr, hlock, ip);
5019                return 0;
5020        }
5021
5022        if (!debug_locks_silent) {
5023                WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
5024                WARN_ON_ONCE(!hlock_class(hlock)->key);
5025        }
5026
5027        if (!validate_chain(curr, hlock, chain_head, chain_key))
5028                return 0;
5029
5030        curr->curr_chain_key = chain_key;
5031        curr->lockdep_depth++;
5032        check_chain_key(curr);
5033#ifdef CONFIG_DEBUG_LOCKDEP
5034        if (unlikely(!debug_locks))
5035                return 0;
5036#endif
5037        if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
5038                debug_locks_off();
5039                print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
5040                printk(KERN_DEBUG "depth: %i  max: %lu!\n",
5041                       curr->lockdep_depth, MAX_LOCK_DEPTH);
5042
5043                lockdep_print_held_locks(current);
5044                debug_show_all_locks();
5045                dump_stack();
5046
5047                return 0;
5048        }
5049
5050        if (unlikely(curr->lockdep_depth > max_lockdep_depth))
5051                max_lockdep_depth = curr->lockdep_depth;
5052
5053        return 1;
5054}
5055
5056static void print_unlock_imbalance_bug(struct task_struct *curr,
5057                                       struct lockdep_map *lock,
5058                                       unsigned long ip)
5059{
5060        if (!debug_locks_off())
5061                return;
5062        if (debug_locks_silent)
5063                return;
5064
5065        pr_warn("\n");
5066        pr_warn("=====================================\n");
5067        pr_warn("WARNING: bad unlock balance detected!\n");
5068        print_kernel_ident();
5069        pr_warn("-------------------------------------\n");
5070        pr_warn("%s/%d is trying to release lock (",
5071                curr->comm, task_pid_nr(curr));
5072        print_lockdep_cache(lock);
5073        pr_cont(") at:\n");
5074        print_ip_sym(KERN_WARNING, ip);
5075        pr_warn("but there are no more locks to release!\n");
5076        pr_warn("\nother info that might help us debug this:\n");
5077        lockdep_print_held_locks(curr);
5078
5079        pr_warn("\nstack backtrace:\n");
5080        dump_stack();
5081}
5082
5083static noinstr int match_held_lock(const struct held_lock *hlock,
5084                                   const struct lockdep_map *lock)
5085{
5086        if (hlock->instance == lock)
5087                return 1;
5088
5089        if (hlock->references) {
5090                const struct lock_class *class = lock->class_cache[0];
5091
5092                if (!class)
5093                        class = look_up_lock_class(lock, 0);
5094
5095                /*
5096                 * If look_up_lock_class() failed to find a class, we're trying
5097                 * to test if we hold a lock that has never yet been acquired.
5098                 * Clearly if the lock hasn't been acquired _ever_, we're not
5099                 * holding it either, so report failure.
5100                 */
5101                if (!class)
5102                        return 0;
5103
5104                /*
5105                 * References, but not a lock we're actually ref-counting?
5106                 * State got messed up, follow the sites that change ->references
5107                 * and try to make sense of it.
5108                 */
5109                if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
5110                        return 0;
5111
5112                if (hlock->class_idx == class - lock_classes)
5113                        return 1;
5114        }
5115
5116        return 0;
5117}
5118
5119/* @depth must not be zero */
5120static struct held_lock *find_held_lock(struct task_struct *curr,
5121                                        struct lockdep_map *lock,
5122                                        unsigned int depth, int *idx)
5123{
5124        struct held_lock *ret, *hlock, *prev_hlock;
5125        int i;
5126
5127        i = depth - 1;
5128        hlock = curr->held_locks + i;
5129        ret = hlock;
5130        if (match_held_lock(hlock, lock))
5131                goto out;
5132
5133        ret = NULL;
5134        for (i--, prev_hlock = hlock--;
5135             i >= 0;
5136             i--, prev_hlock = hlock--) {
5137                /*
5138                 * We must not cross into another context:
5139                 */
5140                if (prev_hlock->irq_context != hlock->irq_context) {
5141                        ret = NULL;
5142                        break;
5143                }
5144                if (match_held_lock(hlock, lock)) {
5145                        ret = hlock;
5146                        break;
5147                }
5148        }
5149
5150out:
5151        *idx = i;
5152        return ret;
5153}
5154
5155static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
5156                                int idx, unsigned int *merged)
5157{
5158        struct held_lock *hlock;
5159        int first_idx = idx;
5160
5161        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
5162                return 0;
5163
5164        for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
5165                switch (__lock_acquire(hlock->instance,
5166                                    hlock_class(hlock)->subclass,
5167                                    hlock->trylock,
5168                                    hlock->read, hlock->check,
5169                                    hlock->hardirqs_off,
5170                                    hlock->nest_lock, hlock->acquire_ip,
5171                                    hlock->references, hlock->pin_count)) {
5172                case 0:
5173                        return 1;
5174                case 1:
5175                        break;
5176                case 2:
5177                        *merged += (idx == first_idx);
5178                        break;
5179                default:
5180                        WARN_ON(1);
5181                        return 0;
5182                }
5183        }
5184        return 0;
5185}
5186
5187static int
5188__lock_set_class(struct lockdep_map *lock, const char *name,
5189                 struct lock_class_key *key, unsigned int subclass,
5190                 unsigned long ip)
5191{
5192        struct task_struct *curr = current;
5193        unsigned int depth, merged = 0;
5194        struct held_lock *hlock;
5195        struct lock_class *class;
5196        int i;
5197
5198        if (unlikely(!debug_locks))
5199                return 0;
5200
5201        depth = curr->lockdep_depth;
5202        /*
5203         * This function is about (re)setting the class of a held lock,
5204         * yet we're not actually holding any locks. Naughty user!
5205         */
5206        if (DEBUG_LOCKS_WARN_ON(!depth))
5207                return 0;
5208
5209        hlock = find_held_lock(curr, lock, depth, &i);
5210        if (!hlock) {
5211                print_unlock_imbalance_bug(curr, lock, ip);
5212                return 0;
5213        }
5214
5215        lockdep_init_map_waits(lock, name, key, 0,
5216                               lock->wait_type_inner,
5217                               lock->wait_type_outer);
5218        class = register_lock_class(lock, subclass, 0);
5219        hlock->class_idx = class - lock_classes;
5220
5221        curr->lockdep_depth = i;
5222        curr->curr_chain_key = hlock->prev_chain_key;
5223
5224        if (reacquire_held_locks(curr, depth, i, &merged))
5225                return 0;
5226
5227        /*
5228         * I took it apart and put it back together again, except now I have
5229         * these 'spare' parts.. where shall I put them.
5230         */
5231        if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
5232                return 0;
5233        return 1;
5234}
5235
5236static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
5237{
5238        struct task_struct *curr = current;
5239        unsigned int depth, merged = 0;
5240        struct held_lock *hlock;
5241        int i;
5242
5243        if (unlikely(!debug_locks))
5244                return 0;
5245
5246        depth = curr->lockdep_depth;
5247        /*
5248         * This function is about (re)setting the class of a held lock,
5249         * yet we're not actually holding any locks. Naughty user!
5250         */
5251        if (DEBUG_LOCKS_WARN_ON(!depth))
5252                return 0;
5253
5254        hlock = find_held_lock(curr, lock, depth, &i);
5255        if (!hlock) {
5256                print_unlock_imbalance_bug(curr, lock, ip);
5257                return 0;
5258        }
5259
5260        curr->lockdep_depth = i;
5261        curr->curr_chain_key = hlock->prev_chain_key;
5262
5263        WARN(hlock->read, "downgrading a read lock");
5264        hlock->read = 1;
5265        hlock->acquire_ip = ip;
5266
5267        if (reacquire_held_locks(curr, depth, i, &merged))
5268                return 0;
5269
5270        /* Merging can't happen with unchanged classes.. */
5271        if (DEBUG_LOCKS_WARN_ON(merged))
5272                return 0;
5273
5274        /*
5275         * I took it apart and put it back together again, except now I have
5276         * these 'spare' parts.. where shall I put them.
5277         */
5278        if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
5279                return 0;
5280
5281        return 1;
5282}
5283
5284/*
5285 * Remove the lock from the list of currently held locks - this gets
5286 * called on mutex_unlock()/spin_unlock*() (or on a failed
5287 * mutex_lock_interruptible()).
5288 */
5289static int
5290__lock_release(struct lockdep_map *lock, unsigned long ip)
5291{
5292        struct task_struct *curr = current;
5293        unsigned int depth, merged = 1;
5294        struct held_lock *hlock;
5295        int i;
5296
5297        if (unlikely(!debug_locks))
5298                return 0;
5299
5300        depth = curr->lockdep_depth;
5301        /*
5302         * So we're all set to release this lock.. wait what lock? We don't
5303         * own any locks, you've been drinking again?
5304         */
5305        if (depth <= 0) {
5306                print_unlock_imbalance_bug(curr, lock, ip);
5307                return 0;
5308        }
5309
5310        /*
5311         * Check whether the lock exists in the current stack
5312         * of held locks:
5313         */
5314        hlock = find_held_lock(curr, lock, depth, &i);
5315        if (!hlock) {
5316                print_unlock_imbalance_bug(curr, lock, ip);
5317                return 0;
5318        }
5319
5320        if (hlock->instance == lock)
5321                lock_release_holdtime(hlock);
5322
5323        WARN(hlock->pin_count, "releasing a pinned lock\n");
5324
5325        if (hlock->references) {
5326                hlock->references--;
5327                if (hlock->references) {
5328                        /*
5329                         * We had, and after removing one, still have
5330                         * references, the current lock stack is still
5331                         * valid. We're done!
5332                         */
5333                        return 1;
5334                }
5335        }
5336
5337        /*
5338         * We have the right lock to unlock, 'hlock' points to it.
5339         * Now we remove it from the stack, and add back the other
5340         * entries (if any), recalculating the hash along the way:
5341         */
5342
5343        curr->lockdep_depth = i;
5344        curr->curr_chain_key = hlock->prev_chain_key;
5345
5346        /*
5347         * The most likely case is when the unlock is on the innermost
5348         * lock. In this case, we are done!
5349         */
5350        if (i == depth-1)
5351                return 1;
5352
5353        if (reacquire_held_locks(curr, depth, i + 1, &merged))
5354                return 0;
5355
5356        /*
5357         * We had N bottles of beer on the wall, we drank one, but now
5358         * there's not N-1 bottles of beer left on the wall...
5359         * Pouring two of the bottles together is acceptable.
5360         */
5361        DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);
5362
5363        /*
5364         * Since reacquire_held_locks() would have called check_chain_key()
5365         * indirectly via __lock_acquire(), we don't need to do it again
5366         * on return.
5367         */
5368        return 0;
5369}
5370
5371static __always_inline
5372int __lock_is_held(const struct lockdep_map *lock, int read)
5373{
5374        struct task_struct *curr = current;
5375        int i;
5376
5377        for (i = 0; i < curr->lockdep_depth; i++) {
5378                struct held_lock *hlock = curr->held_locks + i;
5379
5380                if (match_held_lock(hlock, lock)) {
5381                        if (read == -1 || !!hlock->read == read)
5382                                return LOCK_STATE_HELD;
5383
5384                        return LOCK_STATE_NOT_HELD;
5385                }
5386        }
5387
5388        return LOCK_STATE_NOT_HELD;
5389}
5390
5391static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
5392{
5393        struct pin_cookie cookie = NIL_COOKIE;
5394        struct task_struct *curr = current;
5395        int i;
5396
5397        if (unlikely(!debug_locks))
5398                return cookie;
5399
5400        for (i = 0; i < curr->lockdep_depth; i++) {
5401                struct held_lock *hlock = curr->held_locks + i;
5402
5403                if (match_held_lock(hlock, lock)) {
5404                        /*
5405                         * Grab 16bits of randomness; this is sufficient to not
5406                         * be guessable and still allows some pin nesting in
5407                         * our u32 pin_count.
5408                         */
5409                        cookie.val = 1 + (prandom_u32() >> 16);
5410                        hlock->pin_count += cookie.val;
5411                        return cookie;
5412                }
5413        }
5414
5415        WARN(1, "pinning an unheld lock\n");
5416        return cookie;
5417}
5418
5419static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5420{
5421        struct task_struct *curr = current;
5422        int i;
5423
5424        if (unlikely(!debug_locks))
5425                return;
5426
5427        for (i = 0; i < curr->lockdep_depth; i++) {
5428                struct held_lock *hlock = curr->held_locks + i;
5429
5430                if (match_held_lock(hlock, lock)) {
5431                        hlock->pin_count += cookie.val;
5432                        return;
5433                }
5434        }
5435
5436        WARN(1, "pinning an unheld lock\n");
5437}
5438
5439static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5440{
5441        struct task_struct *curr = current;
5442        int i;
5443
5444        if (unlikely(!debug_locks))
5445                return;
5446
5447        for (i = 0; i < curr->lockdep_depth; i++) {
5448                struct held_lock *hlock = curr->held_locks + i;
5449
5450                if (match_held_lock(hlock, lock)) {
5451                        if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
5452                                return;
5453
5454                        hlock->pin_count -= cookie.val;
5455
5456                        if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
5457                                hlock->pin_count = 0;
5458
5459                        return;
5460                }
5461        }
5462
5463        WARN(1, "unpinning an unheld lock\n");
5464}
5465
5466/*
5467 * Check whether we follow the irq-flags state precisely:
5468 */
5469static noinstr void check_flags(unsigned long flags)
5470{
5471#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
5472        if (!debug_locks)
5473                return;
5474
5475        /* Get the warning out..  */
5476        instrumentation_begin();
5477
5478        if (irqs_disabled_flags(flags)) {
5479                if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
5480                        printk("possible reason: unannotated irqs-off.\n");
5481                }
5482        } else {
5483                if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) {
5484                        printk("possible reason: unannotated irqs-on.\n");
5485                }
5486        }
5487
5488#ifndef CONFIG_PREEMPT_RT
5489        /*
5490         * We dont accurately track softirq state in e.g.
5491         * hardirq contexts (such as on 4KSTACKS), so only
5492         * check if not in hardirq contexts:
5493         */
5494        if (!hardirq_count()) {
5495                if (softirq_count()) {
5496                        /* like the above, but with softirqs */
5497                        DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
5498                } else {
5499                        /* lick the above, does it taste good? */
5500                        DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
5501                }
5502        }
5503#endif
5504
5505        if (!debug_locks)
5506                print_irqtrace_events(current);
5507
5508        instrumentation_end();
5509#endif
5510}
5511
5512void lock_set_class(struct lockdep_map *lock, const char *name,
5513                    struct lock_class_key *key, unsigned int subclass,
5514                    unsigned long ip)
5515{
5516        unsigned long flags;
5517
5518        if (unlikely(!lockdep_enabled()))
5519                return;
5520
5521        raw_local_irq_save(flags);
5522        lockdep_recursion_inc();
5523        check_flags(flags);
5524        if (__lock_set_class(lock, name, key, subclass, ip))
5525                check_chain_key(current);
5526        lockdep_recursion_finish();
5527        raw_local_irq_restore(flags);
5528}
5529EXPORT_SYMBOL_GPL(lock_set_class);
5530
5531void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
5532{
5533        unsigned long flags;
5534
5535        if (unlikely(!lockdep_enabled()))
5536                return;
5537
5538        raw_local_irq_save(flags);
5539        lockdep_recursion_inc();
5540        check_flags(flags);
5541        if (__lock_downgrade(lock, ip))
5542                check_chain_key(current);
5543        lockdep_recursion_finish();
5544        raw_local_irq_restore(flags);
5545}
5546EXPORT_SYMBOL_GPL(lock_downgrade);
5547
5548/* NMI context !!! */
5549static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock, int subclass)
5550{
5551#ifdef CONFIG_PROVE_LOCKING
5552        struct lock_class *class = look_up_lock_class(lock, subclass);
5553        unsigned long mask = LOCKF_USED;
5554
5555        /* if it doesn't have a class (yet), it certainly hasn't been used yet */
5556        if (!class)
5557                return;
5558
5559        /*
5560         * READ locks only conflict with USED, such that if we only ever use
5561         * READ locks, there is no deadlock possible -- RCU.
5562         */
5563        if (!hlock->read)
5564                mask |= LOCKF_USED_READ;
5565
5566        if (!(class->usage_mask & mask))
5567                return;
5568
5569        hlock->class_idx = class - lock_classes;
5570
5571        print_usage_bug(current, hlock, LOCK_USED, LOCK_USAGE_STATES);
5572#endif
5573}
5574
5575static bool lockdep_nmi(void)
5576{
5577        if (raw_cpu_read(lockdep_recursion))
5578                return false;
5579
5580        if (!in_nmi())
5581                return false;
5582
5583        return true;
5584}
5585
5586/*
5587 * read_lock() is recursive if:
5588 * 1. We force lockdep think this way in selftests or
5589 * 2. The implementation is not queued read/write lock or
5590 * 3. The locker is at an in_interrupt() context.
5591 */
5592bool read_lock_is_recursive(void)
5593{
5594        return force_read_lock_recursive ||
5595               !IS_ENABLED(CONFIG_QUEUED_RWLOCKS) ||
5596               in_interrupt();
5597}
5598EXPORT_SYMBOL_GPL(read_lock_is_recursive);
5599
5600/*
5601 * We are not always called with irqs disabled - do that here,
5602 * and also avoid lockdep recursion:
5603 */
5604void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
5605                          int trylock, int read, int check,
5606                          struct lockdep_map *nest_lock, unsigned long ip)
5607{
5608        unsigned long flags;
5609
5610        trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
5611
5612        if (!debug_locks)
5613                return;
5614
5615        if (unlikely(!lockdep_enabled())) {
5616                /* XXX allow trylock from NMI ?!? */
5617                if (lockdep_nmi() && !trylock) {
5618                        struct held_lock hlock;
5619
5620                        hlock.acquire_ip = ip;
5621                        hlock.instance = lock;
5622                        hlock.nest_lock = nest_lock;
5623                        hlock.irq_context = 2; // XXX
5624                        hlock.trylock = trylock;
5625                        hlock.read = read;
5626                        hlock.check = check;
5627                        hlock.hardirqs_off = true;
5628                        hlock.references = 0;
5629
5630                        verify_lock_unused(lock, &hlock, subclass);
5631                }
5632                return;
5633        }
5634
5635        raw_local_irq_save(flags);
5636        check_flags(flags);
5637
5638        lockdep_recursion_inc();
5639        __lock_acquire(lock, subclass, trylock, read, check,
5640                       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
5641        lockdep_recursion_finish();
5642        raw_local_irq_restore(flags);
5643}
5644EXPORT_SYMBOL_GPL(lock_acquire);
5645
5646void lock_release(struct lockdep_map *lock, unsigned long ip)
5647{
5648        unsigned long flags;
5649
5650        trace_lock_release(lock, ip);
5651
5652        if (unlikely(!lockdep_enabled()))
5653                return;
5654
5655        raw_local_irq_save(flags);
5656        check_flags(flags);
5657
5658        lockdep_recursion_inc();
5659        if (__lock_release(lock, ip))
5660                check_chain_key(current);
5661        lockdep_recursion_finish();
5662        raw_local_irq_restore(flags);
5663}
5664EXPORT_SYMBOL_GPL(lock_release);
5665
5666noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
5667{
5668        unsigned long flags;
5669        int ret = LOCK_STATE_NOT_HELD;
5670
5671        /*
5672         * Avoid false negative lockdep_assert_held() and
5673         * lockdep_assert_not_held().
5674         */
5675        if (unlikely(!lockdep_enabled()))
5676                return LOCK_STATE_UNKNOWN;
5677
5678        raw_local_irq_save(flags);
5679        check_flags(flags);
5680
5681        lockdep_recursion_inc();
5682        ret = __lock_is_held(lock, read);
5683        lockdep_recursion_finish();
5684        raw_local_irq_restore(flags);
5685
5686        return ret;
5687}
5688EXPORT_SYMBOL_GPL(lock_is_held_type);
5689NOKPROBE_SYMBOL(lock_is_held_type);
5690
5691struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
5692{
5693        struct pin_cookie cookie = NIL_COOKIE;
5694        unsigned long flags;
5695
5696        if (unlikely(!lockdep_enabled()))
5697                return cookie;
5698
5699        raw_local_irq_save(flags);
5700        check_flags(flags);
5701
5702        lockdep_recursion_inc();
5703        cookie = __lock_pin_lock(lock);
5704        lockdep_recursion_finish();
5705        raw_local_irq_restore(flags);
5706
5707        return cookie;
5708}
5709EXPORT_SYMBOL_GPL(lock_pin_lock);
5710
5711void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5712{
5713        unsigned long flags;
5714
5715        if (unlikely(!lockdep_enabled()))
5716                return;
5717
5718        raw_local_irq_save(flags);
5719        check_flags(flags);
5720
5721        lockdep_recursion_inc();
5722        __lock_repin_lock(lock, cookie);
5723        lockdep_recursion_finish();
5724        raw_local_irq_restore(flags);
5725}
5726EXPORT_SYMBOL_GPL(lock_repin_lock);
5727
5728void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5729{
5730        unsigned long flags;
5731
5732        if (unlikely(!lockdep_enabled()))
5733                return;
5734
5735        raw_local_irq_save(flags);
5736        check_flags(flags);
5737
5738        lockdep_recursion_inc();
5739        __lock_unpin_lock(lock, cookie);
5740        lockdep_recursion_finish();
5741        raw_local_irq_restore(flags);
5742}
5743EXPORT_SYMBOL_GPL(lock_unpin_lock);
5744
5745#ifdef CONFIG_LOCK_STAT
5746static void print_lock_contention_bug(struct task_struct *curr,
5747                                      struct lockdep_map *lock,
5748                                      unsigned long ip)
5749{
5750        if (!debug_locks_off())
5751                return;
5752        if (debug_locks_silent)
5753                return;
5754
5755        pr_warn("\n");
5756        pr_warn("=================================\n");
5757        pr_warn("WARNING: bad contention detected!\n");
5758        print_kernel_ident();
5759        pr_warn("---------------------------------\n");
5760        pr_warn("%s/%d is trying to contend lock (",
5761                curr->comm, task_pid_nr(curr));
5762        print_lockdep_cache(lock);
5763        pr_cont(") at:\n");
5764        print_ip_sym(KERN_WARNING, ip);
5765        pr_warn("but there are no locks held!\n");
5766        pr_warn("\nother info that might help us debug this:\n");
5767        lockdep_print_held_locks(curr);
5768
5769        pr_warn("\nstack backtrace:\n");
5770        dump_stack();
5771}
5772
5773static void
5774__lock_contended(struct lockdep_map *lock, unsigned long ip)
5775{
5776        struct task_struct *curr = current;
5777        struct held_lock *hlock;
5778        struct lock_class_stats *stats;
5779        unsigned int depth;
5780        int i, contention_point, contending_point;
5781
5782        depth = curr->lockdep_depth;
5783        /*
5784         * Whee, we contended on this lock, except it seems we're not
5785         * actually trying to acquire anything much at all..
5786         */
5787        if (DEBUG_LOCKS_WARN_ON(!depth))
5788                return;
5789
5790        hlock = find_held_lock(curr, lock, depth, &i);
5791        if (!hlock) {
5792                print_lock_contention_bug(curr, lock, ip);
5793                return;
5794        }
5795
5796        if (hlock->instance != lock)
5797                return;
5798
5799        hlock->waittime_stamp = lockstat_clock();
5800
5801        contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
5802        contending_point = lock_point(hlock_class(hlock)->contending_point,
5803                                      lock->ip);
5804
5805        stats = get_lock_stats(hlock_class(hlock));
5806        if (contention_point < LOCKSTAT_POINTS)
5807                stats->contention_point[contention_point]++;
5808        if (contending_point < LOCKSTAT_POINTS)
5809                stats->contending_point[contending_point]++;
5810        if (lock->cpu != smp_processor_id())
5811                stats->bounces[bounce_contended + !!hlock->read]++;
5812}
5813
5814static void
5815__lock_acquired(struct lockdep_map *lock, unsigned long ip)
5816{
5817        struct task_struct *curr = current;
5818        struct held_lock *hlock;
5819        struct lock_class_stats *stats;
5820        unsigned int depth;
5821        u64 now, waittime = 0;
5822        int i, cpu;
5823
5824        depth = curr->lockdep_depth;
5825        /*
5826         * Yay, we acquired ownership of this lock we didn't try to
5827         * acquire, how the heck did that happen?
5828         */
5829        if (DEBUG_LOCKS_WARN_ON(!depth))
5830                return;
5831
5832        hlock = find_held_lock(curr, lock, depth, &i);
5833        if (!hlock) {
5834                print_lock_contention_bug(curr, lock, _RET_IP_);
5835                return;
5836        }
5837
5838        if (hlock->instance != lock)
5839                return;
5840
5841        cpu = smp_processor_id();
5842        if (hlock->waittime_stamp) {
5843                now = lockstat_clock();
5844                waittime = now - hlock->waittime_stamp;
5845                hlock->holdtime_stamp = now;
5846        }
5847
5848        stats = get_lock_stats(hlock_class(hlock));
5849        if (waittime) {
5850                if (hlock->read)
5851                        lock_time_inc(&stats->read_waittime, waittime);
5852                else
5853                        lock_time_inc(&stats->write_waittime, waittime);
5854        }
5855        if (lock->cpu != cpu)
5856                stats->bounces[bounce_acquired + !!hlock->read]++;
5857
5858        lock->cpu = cpu;
5859        lock->ip = ip;
5860}
5861
5862void lock_contended(struct lockdep_map *lock, unsigned long ip)
5863{
5864        unsigned long flags;
5865
5866        trace_lock_contended(lock, ip);
5867
5868        if (unlikely(!lock_stat || !lockdep_enabled()))
5869                return;
5870
5871        raw_local_irq_save(flags);
5872        check_flags(flags);
5873        lockdep_recursion_inc();
5874        __lock_contended(lock, ip);
5875        lockdep_recursion_finish();
5876        raw_local_irq_restore(flags);
5877}
5878EXPORT_SYMBOL_GPL(lock_contended);
5879
5880void lock_acquired(struct lockdep_map *lock, unsigned long ip)
5881{
5882        unsigned long flags;
5883
5884        trace_lock_acquired(lock, ip);
5885
5886        if (unlikely(!lock_stat || !lockdep_enabled()))
5887                return;
5888
5889        raw_local_irq_save(flags);
5890        check_flags(flags);
5891        lockdep_recursion_inc();
5892        __lock_acquired(lock, ip);
5893        lockdep_recursion_finish();
5894        raw_local_irq_restore(flags);
5895}
5896EXPORT_SYMBOL_GPL(lock_acquired);
5897#endif
5898
5899/*
5900 * Used by the testsuite, sanitize the validator state
5901 * after a simulated failure:
5902 */
5903
5904void lockdep_reset(void)
5905{
5906        unsigned long flags;
5907        int i;
5908
5909        raw_local_irq_save(flags);
5910        lockdep_init_task(current);
5911        memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
5912        nr_hardirq_chains = 0;
5913        nr_softirq_chains = 0;
5914        nr_process_chains = 0;
5915        debug_locks = 1;
5916        for (i = 0; i < CHAINHASH_SIZE; i++)
5917                INIT_HLIST_HEAD(chainhash_table + i);
5918        raw_local_irq_restore(flags);
5919}
5920
5921/* Remove a class from a lock chain. Must be called with the graph lock held. */
5922static void remove_class_from_lock_chain(struct pending_free *pf,
5923                                         struct lock_chain *chain,
5924                                         struct lock_class *class)
5925{
5926#ifdef CONFIG_PROVE_LOCKING
5927        int i;
5928
5929        for (i = chain->base; i < chain->base + chain->depth; i++) {
5930                if (chain_hlock_class_idx(chain_hlocks[i]) != class - lock_classes)
5931                        continue;
5932                /*
5933                 * Each lock class occurs at most once in a lock chain so once
5934                 * we found a match we can break out of this loop.
5935                 */
5936                goto free_lock_chain;
5937        }
5938        /* Since the chain has not been modified, return. */
5939        return;
5940
5941free_lock_chain:
5942        free_chain_hlocks(chain->base, chain->depth);
5943        /* Overwrite the chain key for concurrent RCU readers. */
5944        WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY);
5945        dec_chains(chain->irq_context);
5946
5947        /*
5948         * Note: calling hlist_del_rcu() from inside a
5949         * hlist_for_each_entry_rcu() loop is safe.
5950         */
5951        hlist_del_rcu(&chain->entry);
5952        __set_bit(chain - lock_chains, pf->lock_chains_being_freed);
5953        nr_zapped_lock_chains++;
5954#endif
5955}
5956
5957/* Must be called with the graph lock held. */
5958static void remove_class_from_lock_chains(struct pending_free *pf,
5959                                          struct lock_class *class)
5960{
5961        struct lock_chain *chain;
5962        struct hlist_head *head;
5963        int i;
5964
5965        for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
5966                head = chainhash_table + i;
5967                hlist_for_each_entry_rcu(chain, head, entry) {
5968                        remove_class_from_lock_chain(pf, chain, class);
5969                }
5970        }
5971}
5972
5973/*
5974 * Remove all references to a lock class. The caller must hold the graph lock.
5975 */
5976static void zap_class(struct pending_free *pf, struct lock_class *class)
5977{
5978        struct lock_list *entry;
5979        int i;
5980
5981        WARN_ON_ONCE(!class->key);
5982
5983        /*
5984         * Remove all dependencies this lock is
5985         * involved in:
5986         */
5987        for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
5988                entry = list_entries + i;
5989                if (entry->class != class && entry->links_to != class)
5990                        continue;
5991                __clear_bit(i, list_entries_in_use);
5992                nr_list_entries--;
5993                list_del_rcu(&entry->entry);
5994        }
5995        if (list_empty(&class->locks_after) &&
5996            list_empty(&class->locks_before)) {
5997                list_move_tail(&class->lock_entry, &pf->zapped);
5998                hlist_del_rcu(&class->hash_entry);
5999                WRITE_ONCE(class->key, NULL);
6000                WRITE_ONCE(class->name, NULL);
6001                nr_lock_classes--;
6002                __clear_bit(class - lock_classes, lock_classes_in_use);
6003        } else {
6004                WARN_ONCE(true, "%s() failed for class %s\n", __func__,
6005                          class->name);
6006        }
6007
6008        remove_class_from_lock_chains(pf, class);
6009        nr_zapped_classes++;
6010}
6011
6012static void reinit_class(struct lock_class *class)
6013{
6014        void *const p = class;
6015        const unsigned int offset = offsetof(struct lock_class, key);
6016
6017        WARN_ON_ONCE(!class->lock_entry.next);
6018        WARN_ON_ONCE(!list_empty(&class->locks_after));
6019        WARN_ON_ONCE(!list_empty(&class->locks_before));
6020        memset(p + offset, 0, sizeof(*class) - offset);
6021        WARN_ON_ONCE(!class->lock_entry.next);
6022        WARN_ON_ONCE(!list_empty(&class->locks_after));
6023        WARN_ON_ONCE(!list_empty(&class->locks_before));
6024}
6025
6026static inline int within(const void *addr, void *start, unsigned long size)
6027{
6028        return addr >= start && addr < start + size;
6029}
6030
6031static bool inside_selftest(void)
6032{
6033        return current == lockdep_selftest_task_struct;
6034}
6035
6036/* The caller must hold the graph lock. */
6037static struct pending_free *get_pending_free(void)
6038{
6039        return delayed_free.pf + delayed_free.index;
6040}
6041
6042static void free_zapped_rcu(struct rcu_head *cb);
6043
6044/*
6045 * Schedule an RCU callback if no RCU callback is pending. Must be called with
6046 * the graph lock held.
6047 */
6048static void call_rcu_zapped(struct pending_free *pf)
6049{
6050        WARN_ON_ONCE(inside_selftest());
6051
6052        if (list_empty(&pf->zapped))
6053                return;
6054
6055        if (delayed_free.scheduled)
6056                return;
6057
6058        delayed_free.scheduled = true;
6059
6060        WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
6061        delayed_free.index ^= 1;
6062
6063        call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
6064}
6065
6066/* The caller must hold the graph lock. May be called from RCU context. */
6067static void __free_zapped_classes(struct pending_free *pf)
6068{
6069        struct lock_class *class;
6070
6071        check_data_structures();
6072
6073        list_for_each_entry(class, &pf->zapped, lock_entry)
6074                reinit_class(class);
6075
6076        list_splice_init(&pf->zapped, &free_lock_classes);
6077
6078#ifdef CONFIG_PROVE_LOCKING
6079        bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
6080                      pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
6081        bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
6082#endif
6083}
6084
6085static void free_zapped_rcu(struct rcu_head *ch)
6086{
6087        struct pending_free *pf;
6088        unsigned long flags;
6089
6090        if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
6091                return;
6092
6093        raw_local_irq_save(flags);
6094        lockdep_lock();
6095
6096        /* closed head */
6097        pf = delayed_free.pf + (delayed_free.index ^ 1);
6098        __free_zapped_classes(pf);
6099        delayed_free.scheduled = false;
6100
6101        /*
6102         * If there's anything on the open list, close and start a new callback.
6103         */
6104        call_rcu_zapped(delayed_free.pf + delayed_free.index);
6105
6106        lockdep_unlock();
6107        raw_local_irq_restore(flags);
6108}
6109
6110/*
6111 * Remove all lock classes from the class hash table and from the
6112 * all_lock_classes list whose key or name is in the address range [start,
6113 * start + size). Move these lock classes to the zapped_classes list. Must
6114 * be called with the graph lock held.
6115 */
6116static void __lockdep_free_key_range(struct pending_free *pf, void *start,
6117                                     unsigned long size)
6118{
6119        struct lock_class *class;
6120        struct hlist_head *head;
6121        int i;
6122
6123        /* Unhash all classes that were created by a module. */
6124        for (i = 0; i < CLASSHASH_SIZE; i++) {
6125                head = classhash_table + i;
6126                hlist_for_each_entry_rcu(class, head, hash_entry) {
6127                        if (!within(class->key, start, size) &&
6128                            !within(class->name, start, size))
6129                                continue;
6130                        zap_class(pf, class);
6131                }
6132        }
6133}
6134
6135/*
6136 * Used in module.c to remove lock classes from memory that is going to be
6137 * freed; and possibly re-used by other modules.
6138 *
6139 * We will have had one synchronize_rcu() before getting here, so we're
6140 * guaranteed nobody will look up these exact classes -- they're properly dead
6141 * but still allocated.
6142 */
6143static void lockdep_free_key_range_reg(void *start, unsigned long size)
6144{
6145        struct pending_free *pf;
6146        unsigned long flags;
6147
6148        init_data_structures_once();
6149
6150        raw_local_irq_save(flags);
6151        lockdep_lock();
6152        pf = get_pending_free();
6153        __lockdep_free_key_range(pf, start, size);
6154        call_rcu_zapped(pf);
6155        lockdep_unlock();
6156        raw_local_irq_restore(flags);
6157
6158        /*
6159         * Wait for any possible iterators from look_up_lock_class() to pass
6160         * before continuing to free the memory they refer to.
6161         */
6162        synchronize_rcu();
6163}
6164
6165/*
6166 * Free all lockdep keys in the range [start, start+size). Does not sleep.
6167 * Ignores debug_locks. Must only be used by the lockdep selftests.
6168 */
6169static void lockdep_free_key_range_imm(void *start, unsigned long size)
6170{
6171        struct pending_free *pf = delayed_free.pf;
6172        unsigned long flags;
6173
6174        init_data_structures_once();
6175
6176        raw_local_irq_save(flags);
6177        lockdep_lock();
6178        __lockdep_free_key_range(pf, start, size);
6179        __free_zapped_classes(pf);
6180        lockdep_unlock();
6181        raw_local_irq_restore(flags);
6182}
6183
6184void lockdep_free_key_range(void *start, unsigned long size)
6185{
6186        init_data_structures_once();
6187
6188        if (inside_selftest())
6189                lockdep_free_key_range_imm(start, size);
6190        else
6191                lockdep_free_key_range_reg(start, size);
6192}
6193
6194/*
6195 * Check whether any element of the @lock->class_cache[] array refers to a
6196 * registered lock class. The caller must hold either the graph lock or the
6197 * RCU read lock.
6198 */
6199static bool lock_class_cache_is_registered(struct lockdep_map *lock)
6200{
6201        struct lock_class *class;
6202        struct hlist_head *head;
6203        int i, j;
6204
6205        for (i = 0; i < CLASSHASH_SIZE; i++) {
6206                head = classhash_table + i;
6207                hlist_for_each_entry_rcu(class, head, hash_entry) {
6208                        for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
6209                                if (lock->class_cache[j] == class)
6210                                        return true;
6211                }
6212        }
6213        return false;
6214}
6215
6216/* The caller must hold the graph lock. Does not sleep. */
6217static void __lockdep_reset_lock(struct pending_free *pf,
6218                                 struct lockdep_map *lock)
6219{
6220        struct lock_class *class;
6221        int j;
6222
6223        /*
6224         * Remove all classes this lock might have:
6225         */
6226        for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
6227                /*
6228                 * If the class exists we look it up and zap it:
6229                 */
6230                class = look_up_lock_class(lock, j);
6231                if (class)
6232                        zap_class(pf, class);
6233        }
6234        /*
6235         * Debug check: in the end all mapped classes should
6236         * be gone.
6237         */
6238        if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
6239                debug_locks_off();
6240}
6241
6242/*
6243 * Remove all information lockdep has about a lock if debug_locks == 1. Free
6244 * released data structures from RCU context.
6245 */
6246static void lockdep_reset_lock_reg(struct lockdep_map *lock)
6247{
6248        struct pending_free *pf;
6249        unsigned long flags;
6250        int locked;
6251
6252        raw_local_irq_save(flags);
6253        locked = graph_lock();
6254        if (!locked)
6255                goto out_irq;
6256
6257        pf = get_pending_free();
6258        __lockdep_reset_lock(pf, lock);
6259        call_rcu_zapped(pf);
6260
6261        graph_unlock();
6262out_irq:
6263        raw_local_irq_restore(flags);
6264}
6265
6266/*
6267 * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
6268 * lockdep selftests.
6269 */
6270static void lockdep_reset_lock_imm(struct lockdep_map *lock)
6271{
6272        struct pending_free *pf = delayed_free.pf;
6273        unsigned long flags;
6274
6275        raw_local_irq_save(flags);
6276        lockdep_lock();
6277        __lockdep_reset_lock(pf, lock);
6278        __free_zapped_classes(pf);
6279        lockdep_unlock();
6280        raw_local_irq_restore(flags);
6281}
6282
6283void lockdep_reset_lock(struct lockdep_map *lock)
6284{
6285        init_data_structures_once();
6286
6287        if (inside_selftest())
6288                lockdep_reset_lock_imm(lock);
6289        else
6290                lockdep_reset_lock_reg(lock);
6291}
6292
6293/* Unregister a dynamically allocated key. */
6294void lockdep_unregister_key(struct lock_class_key *key)
6295{
6296        struct hlist_head *hash_head = keyhashentry(key);
6297        struct lock_class_key *k;
6298        struct pending_free *pf;
6299        unsigned long flags;
6300        bool found = false;
6301
6302        might_sleep();
6303
6304        if (WARN_ON_ONCE(static_obj(key)))
6305                return;
6306
6307        raw_local_irq_save(flags);
6308        if (!graph_lock())
6309                goto out_irq;
6310
6311        pf = get_pending_free();
6312        hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
6313                if (k == key) {
6314                        hlist_del_rcu(&k->hash_entry);
6315                        found = true;
6316                        break;
6317                }
6318        }
6319        WARN_ON_ONCE(!found);
6320        __lockdep_free_key_range(pf, key, 1);
6321        call_rcu_zapped(pf);
6322        graph_unlock();
6323out_irq:
6324        raw_local_irq_restore(flags);
6325
6326        /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
6327        synchronize_rcu();
6328}
6329EXPORT_SYMBOL_GPL(lockdep_unregister_key);
6330
6331void __init lockdep_init(void)
6332{
6333        printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
6334
6335        printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
6336        printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
6337        printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
6338        printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
6339        printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
6340        printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
6341        printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
6342
6343        printk(" memory used by lock dependency info: %zu kB\n",
6344               (sizeof(lock_classes) +
6345                sizeof(lock_classes_in_use) +
6346                sizeof(classhash_table) +
6347                sizeof(list_entries) +
6348                sizeof(list_entries_in_use) +
6349                sizeof(chainhash_table) +
6350                sizeof(delayed_free)
6351#ifdef CONFIG_PROVE_LOCKING
6352                + sizeof(lock_cq)
6353                + sizeof(lock_chains)
6354                + sizeof(lock_chains_in_use)
6355                + sizeof(chain_hlocks)
6356#endif
6357                ) / 1024
6358                );
6359
6360#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
6361        printk(" memory used for stack traces: %zu kB\n",
6362               (sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024
6363               );
6364#endif
6365
6366        printk(" per task-struct memory footprint: %zu bytes\n",
6367               sizeof(((struct task_struct *)NULL)->held_locks));
6368}
6369
6370static void
6371print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
6372                     const void *mem_to, struct held_lock *hlock)
6373{
6374        if (!debug_locks_off())
6375                return;
6376        if (debug_locks_silent)
6377                return;
6378
6379        pr_warn("\n");
6380        pr_warn("=========================\n");
6381        pr_warn("WARNING: held lock freed!\n");
6382        print_kernel_ident();
6383        pr_warn("-------------------------\n");
6384        pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
6385                curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
6386        print_lock(hlock);
6387        lockdep_print_held_locks(curr);
6388
6389        pr_warn("\nstack backtrace:\n");
6390        dump_stack();
6391}
6392
6393static inline int not_in_range(const void* mem_from, unsigned long mem_len,
6394                                const void* lock_from, unsigned long lock_len)
6395{
6396        return lock_from + lock_len <= mem_from ||
6397                mem_from + mem_len <= lock_from;
6398}
6399
6400/*
6401 * Called when kernel memory is freed (or unmapped), or if a lock
6402 * is destroyed or reinitialized - this code checks whether there is
6403 * any held lock in the memory range of <from> to <to>:
6404 */
6405void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
6406{
6407        struct task_struct *curr = current;
6408        struct held_lock *hlock;
6409        unsigned long flags;
6410        int i;
6411
6412        if (unlikely(!debug_locks))
6413                return;
6414
6415        raw_local_irq_save(flags);
6416        for (i = 0; i < curr->lockdep_depth; i++) {
6417                hlock = curr->held_locks + i;
6418
6419                if (not_in_range(mem_from, mem_len, hlock->instance,
6420                                        sizeof(*hlock->instance)))
6421                        continue;
6422
6423                print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
6424                break;
6425        }
6426        raw_local_irq_restore(flags);
6427}
6428EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
6429
6430static void print_held_locks_bug(void)
6431{
6432        if (!debug_locks_off())
6433                return;
6434        if (debug_locks_silent)
6435                return;
6436
6437        pr_warn("\n");
6438        pr_warn("====================================\n");
6439        pr_warn("WARNING: %s/%d still has locks held!\n",
6440               current->comm, task_pid_nr(current));
6441        print_kernel_ident();
6442        pr_warn("------------------------------------\n");
6443        lockdep_print_held_locks(current);
6444        pr_warn("\nstack backtrace:\n");
6445        dump_stack();
6446}
6447
6448void debug_check_no_locks_held(void)
6449{
6450        if (unlikely(current->lockdep_depth > 0))
6451                print_held_locks_bug();
6452}
6453EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
6454
6455#ifdef __KERNEL__
6456void debug_show_all_locks(void)
6457{
6458        struct task_struct *g, *p;
6459
6460        if (unlikely(!debug_locks)) {
6461                pr_warn("INFO: lockdep is turned off.\n");
6462                return;
6463        }
6464        pr_warn("\nShowing all locks held in the system:\n");
6465
6466        rcu_read_lock();
6467        for_each_process_thread(g, p) {
6468                if (!p->lockdep_depth)
6469                        continue;
6470                lockdep_print_held_locks(p);
6471                touch_nmi_watchdog();
6472                touch_all_softlockup_watchdogs();
6473        }
6474        rcu_read_unlock();
6475
6476        pr_warn("\n");
6477        pr_warn("=============================================\n\n");
6478}
6479EXPORT_SYMBOL_GPL(debug_show_all_locks);
6480#endif
6481
6482/*
6483 * Careful: only use this function if you are sure that
6484 * the task cannot run in parallel!
6485 */
6486void debug_show_held_locks(struct task_struct *task)
6487{
6488        if (unlikely(!debug_locks)) {
6489                printk("INFO: lockdep is turned off.\n");
6490                return;
6491        }
6492        lockdep_print_held_locks(task);
6493}
6494EXPORT_SYMBOL_GPL(debug_show_held_locks);
6495
6496asmlinkage __visible void lockdep_sys_exit(void)
6497{
6498        struct task_struct *curr = current;
6499
6500        if (unlikely(curr->lockdep_depth)) {
6501                if (!debug_locks_off())
6502                        return;
6503                pr_warn("\n");
6504                pr_warn("================================================\n");
6505                pr_warn("WARNING: lock held when returning to user space!\n");
6506                print_kernel_ident();
6507                pr_warn("------------------------------------------------\n");
6508                pr_warn("%s/%d is leaving the kernel with locks still held!\n",
6509                                curr->comm, curr->pid);
6510                lockdep_print_held_locks(curr);
6511        }
6512
6513        /*
6514         * The lock history for each syscall should be independent. So wipe the
6515         * slate clean on return to userspace.
6516         */
6517        lockdep_invariant_state(false);
6518}
6519
6520void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
6521{
6522        struct task_struct *curr = current;
6523        int dl = READ_ONCE(debug_locks);
6524
6525        /* Note: the following can be executed concurrently, so be careful. */
6526        pr_warn("\n");
6527        pr_warn("=============================\n");
6528        pr_warn("WARNING: suspicious RCU usage\n");
6529        print_kernel_ident();
6530        pr_warn("-----------------------------\n");
6531        pr_warn("%s:%d %s!\n", file, line, s);
6532        pr_warn("\nother info that might help us debug this:\n\n");
6533        pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n%s",
6534               !rcu_lockdep_current_cpu_online()
6535                        ? "RCU used illegally from offline CPU!\n"
6536                        : "",
6537               rcu_scheduler_active, dl,
6538               dl ? "" : "Possible false positive due to lockdep disabling via debug_locks = 0\n");
6539
6540        /*
6541         * If a CPU is in the RCU-free window in idle (ie: in the section
6542         * between rcu_idle_enter() and rcu_idle_exit(), then RCU
6543         * considers that CPU to be in an "extended quiescent state",
6544         * which means that RCU will be completely ignoring that CPU.
6545         * Therefore, rcu_read_lock() and friends have absolutely no
6546         * effect on a CPU running in that state. In other words, even if
6547         * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
6548         * delete data structures out from under it.  RCU really has no
6549         * choice here: we need to keep an RCU-free window in idle where
6550         * the CPU may possibly enter into low power mode. This way we can
6551         * notice an extended quiescent state to other CPUs that started a grace
6552         * period. Otherwise we would delay any grace period as long as we run
6553         * in the idle task.
6554         *
6555         * So complain bitterly if someone does call rcu_read_lock(),
6556         * rcu_read_lock_bh() and so on from extended quiescent states.
6557         */
6558        if (!rcu_is_watching())
6559                pr_warn("RCU used illegally from extended quiescent state!\n");
6560
6561        lockdep_print_held_locks(curr);
6562        pr_warn("\nstack backtrace:\n");
6563        dump_stack();
6564}
6565EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
6566