linux/kernel/lockdep.c
<<
>>
Prefs
   1/*
   2 * kernel/lockdep.c
   3 *
   4 * Runtime locking correctness validator
   5 *
   6 * Started by Ingo Molnar:
   7 *
   8 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   9 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  10 *
  11 * this code maps all the lock dependencies as they occur in a live kernel
  12 * and will warn about the following classes of locking bugs:
  13 *
  14 * - lock inversion scenarios
  15 * - circular lock dependencies
  16 * - hardirq/softirq safe/unsafe locking bugs
  17 *
  18 * Bugs are reported even if the current locking scenario does not cause
  19 * any deadlock at this point.
  20 *
  21 * I.e. if anytime in the past two locks were taken in a different order,
  22 * even if it happened for another task, even if those were different
  23 * locks (but of the same class as this lock), this code will detect it.
  24 *
  25 * Thanks to Arjan van de Ven for coming up with the initial idea of
  26 * mapping lock dependencies runtime.
  27 */
  28#include <linux/mutex.h>
  29#include <linux/sched.h>
  30#include <linux/delay.h>
  31#include <linux/module.h>
  32#include <linux/proc_fs.h>
  33#include <linux/seq_file.h>
  34#include <linux/spinlock.h>
  35#include <linux/kallsyms.h>
  36#include <linux/interrupt.h>
  37#include <linux/stacktrace.h>
  38#include <linux/debug_locks.h>
  39#include <linux/irqflags.h>
  40#include <linux/utsname.h>
  41#include <linux/hash.h>
  42
  43#include <asm/sections.h>
  44
  45#include "lockdep_internals.h"
  46
  47#ifdef CONFIG_PROVE_LOCKING
  48int prove_locking = 1;
  49module_param(prove_locking, int, 0644);
  50#else
  51#define prove_locking 0
  52#endif
  53
  54#ifdef CONFIG_LOCK_STAT
  55int lock_stat = 1;
  56module_param(lock_stat, int, 0644);
  57#else
  58#define lock_stat 0
  59#endif
  60
  61/*
  62 * lockdep_lock: protects the lockdep graph, the hashes and the
  63 *               class/list/hash allocators.
  64 *
  65 * This is one of the rare exceptions where it's justified
  66 * to use a raw spinlock - we really dont want the spinlock
  67 * code to recurse back into the lockdep code...
  68 */
  69static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  70
  71static int graph_lock(void)
  72{
  73        __raw_spin_lock(&lockdep_lock);
  74        /*
  75         * Make sure that if another CPU detected a bug while
  76         * walking the graph we dont change it (while the other
  77         * CPU is busy printing out stuff with the graph lock
  78         * dropped already)
  79         */
  80        if (!debug_locks) {
  81                __raw_spin_unlock(&lockdep_lock);
  82                return 0;
  83        }
  84        return 1;
  85}
  86
  87static inline int graph_unlock(void)
  88{
  89        if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
  90                return DEBUG_LOCKS_WARN_ON(1);
  91
  92        __raw_spin_unlock(&lockdep_lock);
  93        return 0;
  94}
  95
  96/*
  97 * Turn lock debugging off and return with 0 if it was off already,
  98 * and also release the graph lock:
  99 */
 100static inline int debug_locks_off_graph_unlock(void)
 101{
 102        int ret = debug_locks_off();
 103
 104        __raw_spin_unlock(&lockdep_lock);
 105
 106        return ret;
 107}
 108
 109static int lockdep_initialized;
 110
 111unsigned long nr_list_entries;
 112static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 113
 114/*
 115 * All data structures here are protected by the global debug_lock.
 116 *
 117 * Mutex key structs only get allocated, once during bootup, and never
 118 * get freed - this significantly simplifies the debugging code.
 119 */
 120unsigned long nr_lock_classes;
 121static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 122
 123#ifdef CONFIG_LOCK_STAT
 124static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
 125
 126static int lock_contention_point(struct lock_class *class, unsigned long ip)
 127{
 128        int i;
 129
 130        for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
 131                if (class->contention_point[i] == 0) {
 132                        class->contention_point[i] = ip;
 133                        break;
 134                }
 135                if (class->contention_point[i] == ip)
 136                        break;
 137        }
 138
 139        return i;
 140}
 141
 142static void lock_time_inc(struct lock_time *lt, s64 time)
 143{
 144        if (time > lt->max)
 145                lt->max = time;
 146
 147        if (time < lt->min || !lt->min)
 148                lt->min = time;
 149
 150        lt->total += time;
 151        lt->nr++;
 152}
 153
 154static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
 155{
 156        dst->min += src->min;
 157        dst->max += src->max;
 158        dst->total += src->total;
 159        dst->nr += src->nr;
 160}
 161
 162struct lock_class_stats lock_stats(struct lock_class *class)
 163{
 164        struct lock_class_stats stats;
 165        int cpu, i;
 166
 167        memset(&stats, 0, sizeof(struct lock_class_stats));
 168        for_each_possible_cpu(cpu) {
 169                struct lock_class_stats *pcs =
 170                        &per_cpu(lock_stats, cpu)[class - lock_classes];
 171
 172                for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
 173                        stats.contention_point[i] += pcs->contention_point[i];
 174
 175                lock_time_add(&pcs->read_waittime, &stats.read_waittime);
 176                lock_time_add(&pcs->write_waittime, &stats.write_waittime);
 177
 178                lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
 179                lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
 180
 181                for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
 182                        stats.bounces[i] += pcs->bounces[i];
 183        }
 184
 185        return stats;
 186}
 187
 188void clear_lock_stats(struct lock_class *class)
 189{
 190        int cpu;
 191
 192        for_each_possible_cpu(cpu) {
 193                struct lock_class_stats *cpu_stats =
 194                        &per_cpu(lock_stats, cpu)[class - lock_classes];
 195
 196                memset(cpu_stats, 0, sizeof(struct lock_class_stats));
 197        }
 198        memset(class->contention_point, 0, sizeof(class->contention_point));
 199}
 200
 201static struct lock_class_stats *get_lock_stats(struct lock_class *class)
 202{
 203        return &get_cpu_var(lock_stats)[class - lock_classes];
 204}
 205
 206static void put_lock_stats(struct lock_class_stats *stats)
 207{
 208        put_cpu_var(lock_stats);
 209}
 210
 211static void lock_release_holdtime(struct held_lock *hlock)
 212{
 213        struct lock_class_stats *stats;
 214        s64 holdtime;
 215
 216        if (!lock_stat)
 217                return;
 218
 219        holdtime = sched_clock() - hlock->holdtime_stamp;
 220
 221        stats = get_lock_stats(hlock->class);
 222        if (hlock->read)
 223                lock_time_inc(&stats->read_holdtime, holdtime);
 224        else
 225                lock_time_inc(&stats->write_holdtime, holdtime);
 226        put_lock_stats(stats);
 227}
 228#else
 229static inline void lock_release_holdtime(struct held_lock *hlock)
 230{
 231}
 232#endif
 233
 234/*
 235 * We keep a global list of all lock classes. The list only grows,
 236 * never shrinks. The list is only accessed with the lockdep
 237 * spinlock lock held.
 238 */
 239LIST_HEAD(all_lock_classes);
 240
 241/*
 242 * The lockdep classes are in a hash-table as well, for fast lookup:
 243 */
 244#define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
 245#define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
 246#define __classhashfn(key)      hash_long((unsigned long)key, CLASSHASH_BITS)
 247#define classhashentry(key)     (classhash_table + __classhashfn((key)))
 248
 249static struct list_head classhash_table[CLASSHASH_SIZE];
 250
 251/*
 252 * We put the lock dependency chains into a hash-table as well, to cache
 253 * their existence:
 254 */
 255#define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
 256#define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
 257#define __chainhashfn(chain)    hash_long(chain, CHAINHASH_BITS)
 258#define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
 259
 260static struct list_head chainhash_table[CHAINHASH_SIZE];
 261
 262/*
 263 * The hash key of the lock dependency chains is a hash itself too:
 264 * it's a hash of all locks taken up to that lock, including that lock.
 265 * It's a 64-bit hash, because it's important for the keys to be
 266 * unique.
 267 */
 268#define iterate_chain_key(key1, key2) \
 269        (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
 270        ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
 271        (key2))
 272
 273void lockdep_off(void)
 274{
 275        current->lockdep_recursion++;
 276}
 277
 278EXPORT_SYMBOL(lockdep_off);
 279
 280void lockdep_on(void)
 281{
 282        current->lockdep_recursion--;
 283}
 284
 285EXPORT_SYMBOL(lockdep_on);
 286
 287/*
 288 * Debugging switches:
 289 */
 290
 291#define VERBOSE                 0
 292#define VERY_VERBOSE            0
 293
 294#if VERBOSE
 295# define HARDIRQ_VERBOSE        1
 296# define SOFTIRQ_VERBOSE        1
 297#else
 298# define HARDIRQ_VERBOSE        0
 299# define SOFTIRQ_VERBOSE        0
 300#endif
 301
 302#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
 303/*
 304 * Quick filtering for interesting events:
 305 */
 306static int class_filter(struct lock_class *class)
 307{
 308#if 0
 309        /* Example */
 310        if (class->name_version == 1 &&
 311                        !strcmp(class->name, "lockname"))
 312                return 1;
 313        if (class->name_version == 1 &&
 314                        !strcmp(class->name, "&struct->lockfield"))
 315                return 1;
 316#endif
 317        /* Filter everything else. 1 would be to allow everything else */
 318        return 0;
 319}
 320#endif
 321
 322static int verbose(struct lock_class *class)
 323{
 324#if VERBOSE
 325        return class_filter(class);
 326#endif
 327        return 0;
 328}
 329
 330/*
 331 * Stack-trace: tightly packed array of stack backtrace
 332 * addresses. Protected by the graph_lock.
 333 */
 334unsigned long nr_stack_trace_entries;
 335static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
 336
 337static int save_trace(struct stack_trace *trace)
 338{
 339        trace->nr_entries = 0;
 340        trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
 341        trace->entries = stack_trace + nr_stack_trace_entries;
 342
 343        trace->skip = 3;
 344
 345        save_stack_trace(trace);
 346
 347        trace->max_entries = trace->nr_entries;
 348
 349        nr_stack_trace_entries += trace->nr_entries;
 350
 351        if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
 352                if (!debug_locks_off_graph_unlock())
 353                        return 0;
 354
 355                printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
 356                printk("turning off the locking correctness validator.\n");
 357                dump_stack();
 358
 359                return 0;
 360        }
 361
 362        return 1;
 363}
 364
 365unsigned int nr_hardirq_chains;
 366unsigned int nr_softirq_chains;
 367unsigned int nr_process_chains;
 368unsigned int max_lockdep_depth;
 369unsigned int max_recursion_depth;
 370
 371#ifdef CONFIG_DEBUG_LOCKDEP
 372/*
 373 * We cannot printk in early bootup code. Not even early_printk()
 374 * might work. So we mark any initialization errors and printk
 375 * about it later on, in lockdep_info().
 376 */
 377static int lockdep_init_error;
 378static unsigned long lockdep_init_trace_data[20];
 379static struct stack_trace lockdep_init_trace = {
 380        .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
 381        .entries = lockdep_init_trace_data,
 382};
 383
 384/*
 385 * Various lockdep statistics:
 386 */
 387atomic_t chain_lookup_hits;
 388atomic_t chain_lookup_misses;
 389atomic_t hardirqs_on_events;
 390atomic_t hardirqs_off_events;
 391atomic_t redundant_hardirqs_on;
 392atomic_t redundant_hardirqs_off;
 393atomic_t softirqs_on_events;
 394atomic_t softirqs_off_events;
 395atomic_t redundant_softirqs_on;
 396atomic_t redundant_softirqs_off;
 397atomic_t nr_unused_locks;
 398atomic_t nr_cyclic_checks;
 399atomic_t nr_cyclic_check_recursions;
 400atomic_t nr_find_usage_forwards_checks;
 401atomic_t nr_find_usage_forwards_recursions;
 402atomic_t nr_find_usage_backwards_checks;
 403atomic_t nr_find_usage_backwards_recursions;
 404# define debug_atomic_inc(ptr)          atomic_inc(ptr)
 405# define debug_atomic_dec(ptr)          atomic_dec(ptr)
 406# define debug_atomic_read(ptr)         atomic_read(ptr)
 407#else
 408# define debug_atomic_inc(ptr)          do { } while (0)
 409# define debug_atomic_dec(ptr)          do { } while (0)
 410# define debug_atomic_read(ptr)         0
 411#endif
 412
 413/*
 414 * Locking printouts:
 415 */
 416
 417static const char *usage_str[] =
 418{
 419        [LOCK_USED] =                   "initial-use ",
 420        [LOCK_USED_IN_HARDIRQ] =        "in-hardirq-W",
 421        [LOCK_USED_IN_SOFTIRQ] =        "in-softirq-W",
 422        [LOCK_ENABLED_SOFTIRQS] =       "softirq-on-W",
 423        [LOCK_ENABLED_HARDIRQS] =       "hardirq-on-W",
 424        [LOCK_USED_IN_HARDIRQ_READ] =   "in-hardirq-R",
 425        [LOCK_USED_IN_SOFTIRQ_READ] =   "in-softirq-R",
 426        [LOCK_ENABLED_SOFTIRQS_READ] =  "softirq-on-R",
 427        [LOCK_ENABLED_HARDIRQS_READ] =  "hardirq-on-R",
 428};
 429
 430const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
 431{
 432        return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
 433}
 434
 435void
 436get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
 437{
 438        *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
 439
 440        if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
 441                *c1 = '+';
 442        else
 443                if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
 444                        *c1 = '-';
 445
 446        if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
 447                *c2 = '+';
 448        else
 449                if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
 450                        *c2 = '-';
 451
 452        if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
 453                *c3 = '-';
 454        if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
 455                *c3 = '+';
 456                if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
 457                        *c3 = '?';
 458        }
 459
 460        if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
 461                *c4 = '-';
 462        if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
 463                *c4 = '+';
 464                if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
 465                        *c4 = '?';
 466        }
 467}
 468
 469static void print_lock_name(struct lock_class *class)
 470{
 471        char str[KSYM_NAME_LEN], c1, c2, c3, c4;
 472        const char *name;
 473
 474        get_usage_chars(class, &c1, &c2, &c3, &c4);
 475
 476        name = class->name;
 477        if (!name) {
 478                name = __get_key_name(class->key, str);
 479                printk(" (%s", name);
 480        } else {
 481                printk(" (%s", name);
 482                if (class->name_version > 1)
 483                        printk("#%d", class->name_version);
 484                if (class->subclass)
 485                        printk("/%d", class->subclass);
 486        }
 487        printk("){%c%c%c%c}", c1, c2, c3, c4);
 488}
 489
 490static void print_lockdep_cache(struct lockdep_map *lock)
 491{
 492        const char *name;
 493        char str[KSYM_NAME_LEN];
 494
 495        name = lock->name;
 496        if (!name)
 497                name = __get_key_name(lock->key->subkeys, str);
 498
 499        printk("%s", name);
 500}
 501
 502static void print_lock(struct held_lock *hlock)
 503{
 504        print_lock_name(hlock->class);
 505        printk(", at: ");
 506        print_ip_sym(hlock->acquire_ip);
 507}
 508
 509static void lockdep_print_held_locks(struct task_struct *curr)
 510{
 511        int i, depth = curr->lockdep_depth;
 512
 513        if (!depth) {
 514                printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
 515                return;
 516        }
 517        printk("%d lock%s held by %s/%d:\n",
 518                depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
 519
 520        for (i = 0; i < depth; i++) {
 521                printk(" #%d: ", i);
 522                print_lock(curr->held_locks + i);
 523        }
 524}
 525
 526static void print_lock_class_header(struct lock_class *class, int depth)
 527{
 528        int bit;
 529
 530        printk("%*s->", depth, "");
 531        print_lock_name(class);
 532        printk(" ops: %lu", class->ops);
 533        printk(" {\n");
 534
 535        for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
 536                if (class->usage_mask & (1 << bit)) {
 537                        int len = depth;
 538
 539                        len += printk("%*s   %s", depth, "", usage_str[bit]);
 540                        len += printk(" at:\n");
 541                        print_stack_trace(class->usage_traces + bit, len);
 542                }
 543        }
 544        printk("%*s }\n", depth, "");
 545
 546        printk("%*s ... key      at: ",depth,"");
 547        print_ip_sym((unsigned long)class->key);
 548}
 549
 550/*
 551 * printk all lock dependencies starting at <entry>:
 552 */
 553static void print_lock_dependencies(struct lock_class *class, int depth)
 554{
 555        struct lock_list *entry;
 556
 557        if (DEBUG_LOCKS_WARN_ON(depth >= 20))
 558                return;
 559
 560        print_lock_class_header(class, depth);
 561
 562        list_for_each_entry(entry, &class->locks_after, entry) {
 563                if (DEBUG_LOCKS_WARN_ON(!entry->class))
 564                        return;
 565
 566                print_lock_dependencies(entry->class, depth + 1);
 567
 568                printk("%*s ... acquired at:\n",depth,"");
 569                print_stack_trace(&entry->trace, 2);
 570                printk("\n");
 571        }
 572}
 573
 574static void print_kernel_version(void)
 575{
 576        printk("%s %.*s\n", init_utsname()->release,
 577                (int)strcspn(init_utsname()->version, " "),
 578                init_utsname()->version);
 579}
 580
 581static int very_verbose(struct lock_class *class)
 582{
 583#if VERY_VERBOSE
 584        return class_filter(class);
 585#endif
 586        return 0;
 587}
 588
 589/*
 590 * Is this the address of a static object:
 591 */
 592static int static_obj(void *obj)
 593{
 594        unsigned long start = (unsigned long) &_stext,
 595                      end   = (unsigned long) &_end,
 596                      addr  = (unsigned long) obj;
 597#ifdef CONFIG_SMP
 598        int i;
 599#endif
 600
 601        /*
 602         * static variable?
 603         */
 604        if ((addr >= start) && (addr < end))
 605                return 1;
 606
 607#ifdef CONFIG_SMP
 608        /*
 609         * percpu var?
 610         */
 611        for_each_possible_cpu(i) {
 612                start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
 613                end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
 614                                        + per_cpu_offset(i);
 615
 616                if ((addr >= start) && (addr < end))
 617                        return 1;
 618        }
 619#endif
 620
 621        /*
 622         * module var?
 623         */
 624        return is_module_address(addr);
 625}
 626
 627/*
 628 * To make lock name printouts unique, we calculate a unique
 629 * class->name_version generation counter:
 630 */
 631static int count_matching_names(struct lock_class *new_class)
 632{
 633        struct lock_class *class;
 634        int count = 0;
 635
 636        if (!new_class->name)
 637                return 0;
 638
 639        list_for_each_entry(class, &all_lock_classes, lock_entry) {
 640                if (new_class->key - new_class->subclass == class->key)
 641                        return class->name_version;
 642                if (class->name && !strcmp(class->name, new_class->name))
 643                        count = max(count, class->name_version);
 644        }
 645
 646        return count + 1;
 647}
 648
 649/*
 650 * Register a lock's class in the hash-table, if the class is not present
 651 * yet. Otherwise we look it up. We cache the result in the lock object
 652 * itself, so actual lookup of the hash should be once per lock object.
 653 */
 654static inline struct lock_class *
 655look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 656{
 657        struct lockdep_subclass_key *key;
 658        struct list_head *hash_head;
 659        struct lock_class *class;
 660
 661#ifdef CONFIG_DEBUG_LOCKDEP
 662        /*
 663         * If the architecture calls into lockdep before initializing
 664         * the hashes then we'll warn about it later. (we cannot printk
 665         * right now)
 666         */
 667        if (unlikely(!lockdep_initialized)) {
 668                lockdep_init();
 669                lockdep_init_error = 1;
 670                save_stack_trace(&lockdep_init_trace);
 671        }
 672#endif
 673
 674        /*
 675         * Static locks do not have their class-keys yet - for them the key
 676         * is the lock object itself:
 677         */
 678        if (unlikely(!lock->key))
 679                lock->key = (void *)lock;
 680
 681        /*
 682         * NOTE: the class-key must be unique. For dynamic locks, a static
 683         * lock_class_key variable is passed in through the mutex_init()
 684         * (or spin_lock_init()) call - which acts as the key. For static
 685         * locks we use the lock object itself as the key.
 686         */
 687        BUILD_BUG_ON(sizeof(struct lock_class_key) >
 688                        sizeof(struct lockdep_map));
 689
 690        key = lock->key->subkeys + subclass;
 691
 692        hash_head = classhashentry(key);
 693
 694        /*
 695         * We can walk the hash lockfree, because the hash only
 696         * grows, and we are careful when adding entries to the end:
 697         */
 698        list_for_each_entry(class, hash_head, hash_entry) {
 699                if (class->key == key) {
 700                        WARN_ON_ONCE(class->name != lock->name);
 701                        return class;
 702                }
 703        }
 704
 705        return NULL;
 706}
 707
 708/*
 709 * Register a lock's class in the hash-table, if the class is not present
 710 * yet. Otherwise we look it up. We cache the result in the lock object
 711 * itself, so actual lookup of the hash should be once per lock object.
 712 */
 713static inline struct lock_class *
 714register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 715{
 716        struct lockdep_subclass_key *key;
 717        struct list_head *hash_head;
 718        struct lock_class *class;
 719        unsigned long flags;
 720
 721        class = look_up_lock_class(lock, subclass);
 722        if (likely(class))
 723                return class;
 724
 725        /*
 726         * Debug-check: all keys must be persistent!
 727         */
 728        if (!static_obj(lock->key)) {
 729                debug_locks_off();
 730                printk("INFO: trying to register non-static key.\n");
 731                printk("the code is fine but needs lockdep annotation.\n");
 732                printk("turning off the locking correctness validator.\n");
 733                dump_stack();
 734
 735                return NULL;
 736        }
 737
 738        key = lock->key->subkeys + subclass;
 739        hash_head = classhashentry(key);
 740
 741        raw_local_irq_save(flags);
 742        if (!graph_lock()) {
 743                raw_local_irq_restore(flags);
 744                return NULL;
 745        }
 746        /*
 747         * We have to do the hash-walk again, to avoid races
 748         * with another CPU:
 749         */
 750        list_for_each_entry(class, hash_head, hash_entry)
 751                if (class->key == key)
 752                        goto out_unlock_set;
 753        /*
 754         * Allocate a new key from the static array, and add it to
 755         * the hash:
 756         */
 757        if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
 758                if (!debug_locks_off_graph_unlock()) {
 759                        raw_local_irq_restore(flags);
 760                        return NULL;
 761                }
 762                raw_local_irq_restore(flags);
 763
 764                printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
 765                printk("turning off the locking correctness validator.\n");
 766                return NULL;
 767        }
 768        class = lock_classes + nr_lock_classes++;
 769        debug_atomic_inc(&nr_unused_locks);
 770        class->key = key;
 771        class->name = lock->name;
 772        class->subclass = subclass;
 773        INIT_LIST_HEAD(&class->lock_entry);
 774        INIT_LIST_HEAD(&class->locks_before);
 775        INIT_LIST_HEAD(&class->locks_after);
 776        class->name_version = count_matching_names(class);
 777        /*
 778         * We use RCU's safe list-add method to make
 779         * parallel walking of the hash-list safe:
 780         */
 781        list_add_tail_rcu(&class->hash_entry, hash_head);
 782
 783        if (verbose(class)) {
 784                graph_unlock();
 785                raw_local_irq_restore(flags);
 786
 787                printk("\nnew class %p: %s", class->key, class->name);
 788                if (class->name_version > 1)
 789                        printk("#%d", class->name_version);
 790                printk("\n");
 791                dump_stack();
 792
 793                raw_local_irq_save(flags);
 794                if (!graph_lock()) {
 795                        raw_local_irq_restore(flags);
 796                        return NULL;
 797                }
 798        }
 799out_unlock_set:
 800        graph_unlock();
 801        raw_local_irq_restore(flags);
 802
 803        if (!subclass || force)
 804                lock->class_cache = class;
 805
 806        if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
 807                return NULL;
 808
 809        return class;
 810}
 811
 812#ifdef CONFIG_PROVE_LOCKING
 813/*
 814 * Allocate a lockdep entry. (assumes the graph_lock held, returns
 815 * with NULL on failure)
 816 */
 817static struct lock_list *alloc_list_entry(void)
 818{
 819        if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
 820                if (!debug_locks_off_graph_unlock())
 821                        return NULL;
 822
 823                printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
 824                printk("turning off the locking correctness validator.\n");
 825                return NULL;
 826        }
 827        return list_entries + nr_list_entries++;
 828}
 829
 830/*
 831 * Add a new dependency to the head of the list:
 832 */
 833static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 834                            struct list_head *head, unsigned long ip, int distance)
 835{
 836        struct lock_list *entry;
 837        /*
 838         * Lock not present yet - get a new dependency struct and
 839         * add it to the list:
 840         */
 841        entry = alloc_list_entry();
 842        if (!entry)
 843                return 0;
 844
 845        entry->class = this;
 846        entry->distance = distance;
 847        if (!save_trace(&entry->trace))
 848                return 0;
 849
 850        /*
 851         * Since we never remove from the dependency list, the list can
 852         * be walked lockless by other CPUs, it's only allocation
 853         * that must be protected by the spinlock. But this also means
 854         * we must make new entries visible only once writes to the
 855         * entry become visible - hence the RCU op:
 856         */
 857        list_add_tail_rcu(&entry->entry, head);
 858
 859        return 1;
 860}
 861
 862/*
 863 * Recursive, forwards-direction lock-dependency checking, used for
 864 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
 865 * checking.
 866 *
 867 * (to keep the stackframe of the recursive functions small we
 868 *  use these global variables, and we also mark various helper
 869 *  functions as noinline.)
 870 */
 871static struct held_lock *check_source, *check_target;
 872
 873/*
 874 * Print a dependency chain entry (this is only done when a deadlock
 875 * has been detected):
 876 */
 877static noinline int
 878print_circular_bug_entry(struct lock_list *target, unsigned int depth)
 879{
 880        if (debug_locks_silent)
 881                return 0;
 882        printk("\n-> #%u", depth);
 883        print_lock_name(target->class);
 884        printk(":\n");
 885        print_stack_trace(&target->trace, 6);
 886
 887        return 0;
 888}
 889
 890/*
 891 * When a circular dependency is detected, print the
 892 * header first:
 893 */
 894static noinline int
 895print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 896{
 897        struct task_struct *curr = current;
 898
 899        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 900                return 0;
 901
 902        printk("\n=======================================================\n");
 903        printk(  "[ INFO: possible circular locking dependency detected ]\n");
 904        print_kernel_version();
 905        printk(  "-------------------------------------------------------\n");
 906        printk("%s/%d is trying to acquire lock:\n",
 907                curr->comm, task_pid_nr(curr));
 908        print_lock(check_source);
 909        printk("\nbut task is already holding lock:\n");
 910        print_lock(check_target);
 911        printk("\nwhich lock already depends on the new lock.\n\n");
 912        printk("\nthe existing dependency chain (in reverse order) is:\n");
 913
 914        print_circular_bug_entry(entry, depth);
 915
 916        return 0;
 917}
 918
 919static noinline int print_circular_bug_tail(void)
 920{
 921        struct task_struct *curr = current;
 922        struct lock_list this;
 923
 924        if (debug_locks_silent)
 925                return 0;
 926
 927        this.class = check_source->class;
 928        if (!save_trace(&this.trace))
 929                return 0;
 930
 931        print_circular_bug_entry(&this, 0);
 932
 933        printk("\nother info that might help us debug this:\n\n");
 934        lockdep_print_held_locks(curr);
 935
 936        printk("\nstack backtrace:\n");
 937        dump_stack();
 938
 939        return 0;
 940}
 941
 942#define RECURSION_LIMIT 40
 943
 944static int noinline print_infinite_recursion_bug(void)
 945{
 946        if (!debug_locks_off_graph_unlock())
 947                return 0;
 948
 949        WARN_ON(1);
 950
 951        return 0;
 952}
 953
 954/*
 955 * Prove that the dependency graph starting at <entry> can not
 956 * lead to <target>. Print an error and return 0 if it does.
 957 */
 958static noinline int
 959check_noncircular(struct lock_class *source, unsigned int depth)
 960{
 961        struct lock_list *entry;
 962
 963        debug_atomic_inc(&nr_cyclic_check_recursions);
 964        if (depth > max_recursion_depth)
 965                max_recursion_depth = depth;
 966        if (depth >= RECURSION_LIMIT)
 967                return print_infinite_recursion_bug();
 968        /*
 969         * Check this lock's dependency list:
 970         */
 971        list_for_each_entry(entry, &source->locks_after, entry) {
 972                if (entry->class == check_target->class)
 973                        return print_circular_bug_header(entry, depth+1);
 974                debug_atomic_inc(&nr_cyclic_checks);
 975                if (!check_noncircular(entry->class, depth+1))
 976                        return print_circular_bug_entry(entry, depth+1);
 977        }
 978        return 1;
 979}
 980
 981#ifdef CONFIG_TRACE_IRQFLAGS
 982/*
 983 * Forwards and backwards subgraph searching, for the purposes of
 984 * proving that two subgraphs can be connected by a new dependency
 985 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
 986 */
 987static enum lock_usage_bit find_usage_bit;
 988static struct lock_class *forwards_match, *backwards_match;
 989
 990/*
 991 * Find a node in the forwards-direction dependency sub-graph starting
 992 * at <source> that matches <find_usage_bit>.
 993 *
 994 * Return 2 if such a node exists in the subgraph, and put that node
 995 * into <forwards_match>.
 996 *
 997 * Return 1 otherwise and keep <forwards_match> unchanged.
 998 * Return 0 on error.
 999 */
1000static noinline int
1001find_usage_forwards(struct lock_class *source, unsigned int depth)
1002{
1003        struct lock_list *entry;
1004        int ret;
1005
1006        if (depth > max_recursion_depth)
1007                max_recursion_depth = depth;
1008        if (depth >= RECURSION_LIMIT)
1009                return print_infinite_recursion_bug();
1010
1011        debug_atomic_inc(&nr_find_usage_forwards_checks);
1012        if (source->usage_mask & (1 << find_usage_bit)) {
1013                forwards_match = source;
1014                return 2;
1015        }
1016
1017        /*
1018         * Check this lock's dependency list:
1019         */
1020        list_for_each_entry(entry, &source->locks_after, entry) {
1021                debug_atomic_inc(&nr_find_usage_forwards_recursions);
1022                ret = find_usage_forwards(entry->class, depth+1);
1023                if (ret == 2 || ret == 0)
1024                        return ret;
1025        }
1026        return 1;
1027}
1028
1029/*
1030 * Find a node in the backwards-direction dependency sub-graph starting
1031 * at <source> that matches <find_usage_bit>.
1032 *
1033 * Return 2 if such a node exists in the subgraph, and put that node
1034 * into <backwards_match>.
1035 *
1036 * Return 1 otherwise and keep <backwards_match> unchanged.
1037 * Return 0 on error.
1038 */
1039static noinline int
1040find_usage_backwards(struct lock_class *source, unsigned int depth)
1041{
1042        struct lock_list *entry;
1043        int ret;
1044
1045        if (!__raw_spin_is_locked(&lockdep_lock))
1046                return DEBUG_LOCKS_WARN_ON(1);
1047
1048        if (depth > max_recursion_depth)
1049                max_recursion_depth = depth;
1050        if (depth >= RECURSION_LIMIT)
1051                return print_infinite_recursion_bug();
1052
1053        debug_atomic_inc(&nr_find_usage_backwards_checks);
1054        if (source->usage_mask & (1 << find_usage_bit)) {
1055                backwards_match = source;
1056                return 2;
1057        }
1058
1059        /*
1060         * Check this lock's dependency list:
1061         */
1062        list_for_each_entry(entry, &source->locks_before, entry) {
1063                debug_atomic_inc(&nr_find_usage_backwards_recursions);
1064                ret = find_usage_backwards(entry->class, depth+1);
1065                if (ret == 2 || ret == 0)
1066                        return ret;
1067        }
1068        return 1;
1069}
1070
1071static int
1072print_bad_irq_dependency(struct task_struct *curr,
1073                         struct held_lock *prev,
1074                         struct held_lock *next,
1075                         enum lock_usage_bit bit1,
1076                         enum lock_usage_bit bit2,
1077                         const char *irqclass)
1078{
1079        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1080                return 0;
1081
1082        printk("\n======================================================\n");
1083        printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1084                irqclass, irqclass);
1085        print_kernel_version();
1086        printk(  "------------------------------------------------------\n");
1087        printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1088                curr->comm, task_pid_nr(curr),
1089                curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1090                curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1091                curr->hardirqs_enabled,
1092                curr->softirqs_enabled);
1093        print_lock(next);
1094
1095        printk("\nand this task is already holding:\n");
1096        print_lock(prev);
1097        printk("which would create a new lock dependency:\n");
1098        print_lock_name(prev->class);
1099        printk(" ->");
1100        print_lock_name(next->class);
1101        printk("\n");
1102
1103        printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1104                irqclass);
1105        print_lock_name(backwards_match);
1106        printk("\n... which became %s-irq-safe at:\n", irqclass);
1107
1108        print_stack_trace(backwards_match->usage_traces + bit1, 1);
1109
1110        printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1111        print_lock_name(forwards_match);
1112        printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1113        printk("...");
1114
1115        print_stack_trace(forwards_match->usage_traces + bit2, 1);
1116
1117        printk("\nother info that might help us debug this:\n\n");
1118        lockdep_print_held_locks(curr);
1119
1120        printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
1121        print_lock_dependencies(backwards_match, 0);
1122
1123        printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
1124        print_lock_dependencies(forwards_match, 0);
1125
1126        printk("\nstack backtrace:\n");
1127        dump_stack();
1128
1129        return 0;
1130}
1131
1132static int
1133check_usage(struct task_struct *curr, struct held_lock *prev,
1134            struct held_lock *next, enum lock_usage_bit bit_backwards,
1135            enum lock_usage_bit bit_forwards, const char *irqclass)
1136{
1137        int ret;
1138
1139        find_usage_bit = bit_backwards;
1140        /* fills in <backwards_match> */
1141        ret = find_usage_backwards(prev->class, 0);
1142        if (!ret || ret == 1)
1143                return ret;
1144
1145        find_usage_bit = bit_forwards;
1146        ret = find_usage_forwards(next->class, 0);
1147        if (!ret || ret == 1)
1148                return ret;
1149        /* ret == 2 */
1150        return print_bad_irq_dependency(curr, prev, next,
1151                        bit_backwards, bit_forwards, irqclass);
1152}
1153
1154static int
1155check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1156                struct held_lock *next)
1157{
1158        /*
1159         * Prove that the new dependency does not connect a hardirq-safe
1160         * lock with a hardirq-unsafe lock - to achieve this we search
1161         * the backwards-subgraph starting at <prev>, and the
1162         * forwards-subgraph starting at <next>:
1163         */
1164        if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
1165                                        LOCK_ENABLED_HARDIRQS, "hard"))
1166                return 0;
1167
1168        /*
1169         * Prove that the new dependency does not connect a hardirq-safe-read
1170         * lock with a hardirq-unsafe lock - to achieve this we search
1171         * the backwards-subgraph starting at <prev>, and the
1172         * forwards-subgraph starting at <next>:
1173         */
1174        if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
1175                                        LOCK_ENABLED_HARDIRQS, "hard-read"))
1176                return 0;
1177
1178        /*
1179         * Prove that the new dependency does not connect a softirq-safe
1180         * lock with a softirq-unsafe lock - to achieve this we search
1181         * the backwards-subgraph starting at <prev>, and the
1182         * forwards-subgraph starting at <next>:
1183         */
1184        if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
1185                                        LOCK_ENABLED_SOFTIRQS, "soft"))
1186                return 0;
1187        /*
1188         * Prove that the new dependency does not connect a softirq-safe-read
1189         * lock with a softirq-unsafe lock - to achieve this we search
1190         * the backwards-subgraph starting at <prev>, and the
1191         * forwards-subgraph starting at <next>:
1192         */
1193        if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
1194                                        LOCK_ENABLED_SOFTIRQS, "soft"))
1195                return 0;
1196
1197        return 1;
1198}
1199
1200static void inc_chains(void)
1201{
1202        if (current->hardirq_context)
1203                nr_hardirq_chains++;
1204        else {
1205                if (current->softirq_context)
1206                        nr_softirq_chains++;
1207                else
1208                        nr_process_chains++;
1209        }
1210}
1211
1212#else
1213
1214static inline int
1215check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1216                struct held_lock *next)
1217{
1218        return 1;
1219}
1220
1221static inline void inc_chains(void)
1222{
1223        nr_process_chains++;
1224}
1225
1226#endif
1227
1228static int
1229print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1230                   struct held_lock *next)
1231{
1232        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1233                return 0;
1234
1235        printk("\n=============================================\n");
1236        printk(  "[ INFO: possible recursive locking detected ]\n");
1237        print_kernel_version();
1238        printk(  "---------------------------------------------\n");
1239        printk("%s/%d is trying to acquire lock:\n",
1240                curr->comm, task_pid_nr(curr));
1241        print_lock(next);
1242        printk("\nbut task is already holding lock:\n");
1243        print_lock(prev);
1244
1245        printk("\nother info that might help us debug this:\n");
1246        lockdep_print_held_locks(curr);
1247
1248        printk("\nstack backtrace:\n");
1249        dump_stack();
1250
1251        return 0;
1252}
1253
1254/*
1255 * Check whether we are holding such a class already.
1256 *
1257 * (Note that this has to be done separately, because the graph cannot
1258 * detect such classes of deadlocks.)
1259 *
1260 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1261 */
1262static int
1263check_deadlock(struct task_struct *curr, struct held_lock *next,
1264               struct lockdep_map *next_instance, int read)
1265{
1266        struct held_lock *prev;
1267        int i;
1268
1269        for (i = 0; i < curr->lockdep_depth; i++) {
1270                prev = curr->held_locks + i;
1271                if (prev->class != next->class)
1272                        continue;
1273                /*
1274                 * Allow read-after-read recursion of the same
1275                 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1276                 */
1277                if ((read == 2) && prev->read)
1278                        return 2;
1279                return print_deadlock_bug(curr, prev, next);
1280        }
1281        return 1;
1282}
1283
1284/*
1285 * There was a chain-cache miss, and we are about to add a new dependency
1286 * to a previous lock. We recursively validate the following rules:
1287 *
1288 *  - would the adding of the <prev> -> <next> dependency create a
1289 *    circular dependency in the graph? [== circular deadlock]
1290 *
1291 *  - does the new prev->next dependency connect any hardirq-safe lock
1292 *    (in the full backwards-subgraph starting at <prev>) with any
1293 *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1294 *    <next>)? [== illegal lock inversion with hardirq contexts]
1295 *
1296 *  - does the new prev->next dependency connect any softirq-safe lock
1297 *    (in the full backwards-subgraph starting at <prev>) with any
1298 *    softirq-unsafe lock (in the full forwards-subgraph starting at
1299 *    <next>)? [== illegal lock inversion with softirq contexts]
1300 *
1301 * any of these scenarios could lead to a deadlock.
1302 *
1303 * Then if all the validations pass, we add the forwards and backwards
1304 * dependency.
1305 */
1306static int
1307check_prev_add(struct task_struct *curr, struct held_lock *prev,
1308               struct held_lock *next, int distance)
1309{
1310        struct lock_list *entry;
1311        int ret;
1312
1313        /*
1314         * Prove that the new <prev> -> <next> dependency would not
1315         * create a circular dependency in the graph. (We do this by
1316         * forward-recursing into the graph starting at <next>, and
1317         * checking whether we can reach <prev>.)
1318         *
1319         * We are using global variables to control the recursion, to
1320         * keep the stackframe size of the recursive functions low:
1321         */
1322        check_source = next;
1323        check_target = prev;
1324        if (!(check_noncircular(next->class, 0)))
1325                return print_circular_bug_tail();
1326
1327        if (!check_prev_add_irq(curr, prev, next))
1328                return 0;
1329
1330        /*
1331         * For recursive read-locks we do all the dependency checks,
1332         * but we dont store read-triggered dependencies (only
1333         * write-triggered dependencies). This ensures that only the
1334         * write-side dependencies matter, and that if for example a
1335         * write-lock never takes any other locks, then the reads are
1336         * equivalent to a NOP.
1337         */
1338        if (next->read == 2 || prev->read == 2)
1339                return 1;
1340        /*
1341         * Is the <prev> -> <next> dependency already present?
1342         *
1343         * (this may occur even though this is a new chain: consider
1344         *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1345         *  chains - the second one will be new, but L1 already has
1346         *  L2 added to its dependency list, due to the first chain.)
1347         */
1348        list_for_each_entry(entry, &prev->class->locks_after, entry) {
1349                if (entry->class == next->class) {
1350                        if (distance == 1)
1351                                entry->distance = 1;
1352                        return 2;
1353                }
1354        }
1355
1356        /*
1357         * Ok, all validations passed, add the new lock
1358         * to the previous lock's dependency list:
1359         */
1360        ret = add_lock_to_list(prev->class, next->class,
1361                               &prev->class->locks_after, next->acquire_ip, distance);
1362
1363        if (!ret)
1364                return 0;
1365
1366        ret = add_lock_to_list(next->class, prev->class,
1367                               &next->class->locks_before, next->acquire_ip, distance);
1368        if (!ret)
1369                return 0;
1370
1371        /*
1372         * Debugging printouts:
1373         */
1374        if (verbose(prev->class) || verbose(next->class)) {
1375                graph_unlock();
1376                printk("\n new dependency: ");
1377                print_lock_name(prev->class);
1378                printk(" => ");
1379                print_lock_name(next->class);
1380                printk("\n");
1381                dump_stack();
1382                return graph_lock();
1383        }
1384        return 1;
1385}
1386
1387/*
1388 * Add the dependency to all directly-previous locks that are 'relevant'.
1389 * The ones that are relevant are (in increasing distance from curr):
1390 * all consecutive trylock entries and the final non-trylock entry - or
1391 * the end of this context's lock-chain - whichever comes first.
1392 */
1393static int
1394check_prevs_add(struct task_struct *curr, struct held_lock *next)
1395{
1396        int depth = curr->lockdep_depth;
1397        struct held_lock *hlock;
1398
1399        /*
1400         * Debugging checks.
1401         *
1402         * Depth must not be zero for a non-head lock:
1403         */
1404        if (!depth)
1405                goto out_bug;
1406        /*
1407         * At least two relevant locks must exist for this
1408         * to be a head:
1409         */
1410        if (curr->held_locks[depth].irq_context !=
1411                        curr->held_locks[depth-1].irq_context)
1412                goto out_bug;
1413
1414        for (;;) {
1415                int distance = curr->lockdep_depth - depth + 1;
1416                hlock = curr->held_locks + depth-1;
1417                /*
1418                 * Only non-recursive-read entries get new dependencies
1419                 * added:
1420                 */
1421                if (hlock->read != 2) {
1422                        if (!check_prev_add(curr, hlock, next, distance))
1423                                return 0;
1424                        /*
1425                         * Stop after the first non-trylock entry,
1426                         * as non-trylock entries have added their
1427                         * own direct dependencies already, so this
1428                         * lock is connected to them indirectly:
1429                         */
1430                        if (!hlock->trylock)
1431                                break;
1432                }
1433                depth--;
1434                /*
1435                 * End of lock-stack?
1436                 */
1437                if (!depth)
1438                        break;
1439                /*
1440                 * Stop the search if we cross into another context:
1441                 */
1442                if (curr->held_locks[depth].irq_context !=
1443                                curr->held_locks[depth-1].irq_context)
1444                        break;
1445        }
1446        return 1;
1447out_bug:
1448        if (!debug_locks_off_graph_unlock())
1449                return 0;
1450
1451        WARN_ON(1);
1452
1453        return 0;
1454}
1455
1456unsigned long nr_lock_chains;
1457static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1458
1459/*
1460 * Look up a dependency chain. If the key is not present yet then
1461 * add it and return 1 - in this case the new dependency chain is
1462 * validated. If the key is already hashed, return 0.
1463 * (On return with 1 graph_lock is held.)
1464 */
1465static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
1466{
1467        struct list_head *hash_head = chainhashentry(chain_key);
1468        struct lock_chain *chain;
1469
1470        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1471                return 0;
1472        /*
1473         * We can walk it lock-free, because entries only get added
1474         * to the hash:
1475         */
1476        list_for_each_entry(chain, hash_head, entry) {
1477                if (chain->chain_key == chain_key) {
1478cache_hit:
1479                        debug_atomic_inc(&chain_lookup_hits);
1480                        if (very_verbose(class))
1481                                printk("\nhash chain already cached, key: "
1482                                        "%016Lx tail class: [%p] %s\n",
1483                                        (unsigned long long)chain_key,
1484                                        class->key, class->name);
1485                        return 0;
1486                }
1487        }
1488        if (very_verbose(class))
1489                printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1490                        (unsigned long long)chain_key, class->key, class->name);
1491        /*
1492         * Allocate a new chain entry from the static array, and add
1493         * it to the hash:
1494         */
1495        if (!graph_lock())
1496                return 0;
1497        /*
1498         * We have to walk the chain again locked - to avoid duplicates:
1499         */
1500        list_for_each_entry(chain, hash_head, entry) {
1501                if (chain->chain_key == chain_key) {
1502                        graph_unlock();
1503                        goto cache_hit;
1504                }
1505        }
1506        if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1507                if (!debug_locks_off_graph_unlock())
1508                        return 0;
1509
1510                printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1511                printk("turning off the locking correctness validator.\n");
1512                return 0;
1513        }
1514        chain = lock_chains + nr_lock_chains++;
1515        chain->chain_key = chain_key;
1516        list_add_tail_rcu(&chain->entry, hash_head);
1517        debug_atomic_inc(&chain_lookup_misses);
1518        inc_chains();
1519
1520        return 1;
1521}
1522
1523static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1524                struct held_lock *hlock, int chain_head, u64 chain_key)
1525{
1526        /*
1527         * Trylock needs to maintain the stack of held locks, but it
1528         * does not add new dependencies, because trylock can be done
1529         * in any order.
1530         *
1531         * We look up the chain_key and do the O(N^2) check and update of
1532         * the dependencies only if this is a new dependency chain.
1533         * (If lookup_chain_cache() returns with 1 it acquires
1534         * graph_lock for us)
1535         */
1536        if (!hlock->trylock && (hlock->check == 2) &&
1537                        lookup_chain_cache(chain_key, hlock->class)) {
1538                /*
1539                 * Check whether last held lock:
1540                 *
1541                 * - is irq-safe, if this lock is irq-unsafe
1542                 * - is softirq-safe, if this lock is hardirq-unsafe
1543                 *
1544                 * And check whether the new lock's dependency graph
1545                 * could lead back to the previous lock.
1546                 *
1547                 * any of these scenarios could lead to a deadlock. If
1548                 * All validations
1549                 */
1550                int ret = check_deadlock(curr, hlock, lock, hlock->read);
1551
1552                if (!ret)
1553                        return 0;
1554                /*
1555                 * Mark recursive read, as we jump over it when
1556                 * building dependencies (just like we jump over
1557                 * trylock entries):
1558                 */
1559                if (ret == 2)
1560                        hlock->read = 2;
1561                /*
1562                 * Add dependency only if this lock is not the head
1563                 * of the chain, and if it's not a secondary read-lock:
1564                 */
1565                if (!chain_head && ret != 2)
1566                        if (!check_prevs_add(curr, hlock))
1567                                return 0;
1568                graph_unlock();
1569        } else
1570                /* after lookup_chain_cache(): */
1571                if (unlikely(!debug_locks))
1572                        return 0;
1573
1574        return 1;
1575}
1576#else
1577static inline int validate_chain(struct task_struct *curr,
1578                struct lockdep_map *lock, struct held_lock *hlock,
1579                int chain_head, u64 chain_key)
1580{
1581        return 1;
1582}
1583#endif
1584
1585/*
1586 * We are building curr_chain_key incrementally, so double-check
1587 * it from scratch, to make sure that it's done correctly:
1588 */
1589static void check_chain_key(struct task_struct *curr)
1590{
1591#ifdef CONFIG_DEBUG_LOCKDEP
1592        struct held_lock *hlock, *prev_hlock = NULL;
1593        unsigned int i, id;
1594        u64 chain_key = 0;
1595
1596        for (i = 0; i < curr->lockdep_depth; i++) {
1597                hlock = curr->held_locks + i;
1598                if (chain_key != hlock->prev_chain_key) {
1599                        debug_locks_off();
1600                        printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1601                                curr->lockdep_depth, i,
1602                                (unsigned long long)chain_key,
1603                                (unsigned long long)hlock->prev_chain_key);
1604                        WARN_ON(1);
1605                        return;
1606                }
1607                id = hlock->class - lock_classes;
1608                if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1609                        return;
1610
1611                if (prev_hlock && (prev_hlock->irq_context !=
1612                                                        hlock->irq_context))
1613                        chain_key = 0;
1614                chain_key = iterate_chain_key(chain_key, id);
1615                prev_hlock = hlock;
1616        }
1617        if (chain_key != curr->curr_chain_key) {
1618                debug_locks_off();
1619                printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1620                        curr->lockdep_depth, i,
1621                        (unsigned long long)chain_key,
1622                        (unsigned long long)curr->curr_chain_key);
1623                WARN_ON(1);
1624        }
1625#endif
1626}
1627
1628static int
1629print_usage_bug(struct task_struct *curr, struct held_lock *this,
1630                enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1631{
1632        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1633                return 0;
1634
1635        printk("\n=================================\n");
1636        printk(  "[ INFO: inconsistent lock state ]\n");
1637        print_kernel_version();
1638        printk(  "---------------------------------\n");
1639
1640        printk("inconsistent {%s} -> {%s} usage.\n",
1641                usage_str[prev_bit], usage_str[new_bit]);
1642
1643        printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1644                curr->comm, task_pid_nr(curr),
1645                trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1646                trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1647                trace_hardirqs_enabled(curr),
1648                trace_softirqs_enabled(curr));
1649        print_lock(this);
1650
1651        printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1652        print_stack_trace(this->class->usage_traces + prev_bit, 1);
1653
1654        print_irqtrace_events(curr);
1655        printk("\nother info that might help us debug this:\n");
1656        lockdep_print_held_locks(curr);
1657
1658        printk("\nstack backtrace:\n");
1659        dump_stack();
1660
1661        return 0;
1662}
1663
1664/*
1665 * Print out an error if an invalid bit is set:
1666 */
1667static inline int
1668valid_state(struct task_struct *curr, struct held_lock *this,
1669            enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1670{
1671        if (unlikely(this->class->usage_mask & (1 << bad_bit)))
1672                return print_usage_bug(curr, this, bad_bit, new_bit);
1673        return 1;
1674}
1675
1676static int mark_lock(struct task_struct *curr, struct held_lock *this,
1677                     enum lock_usage_bit new_bit);
1678
1679#ifdef CONFIG_TRACE_IRQFLAGS
1680
1681/*
1682 * print irq inversion bug:
1683 */
1684static int
1685print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1686                        struct held_lock *this, int forwards,
1687                        const char *irqclass)
1688{
1689        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1690                return 0;
1691
1692        printk("\n=========================================================\n");
1693        printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
1694        print_kernel_version();
1695        printk(  "---------------------------------------------------------\n");
1696        printk("%s/%d just changed the state of lock:\n",
1697                curr->comm, task_pid_nr(curr));
1698        print_lock(this);
1699        if (forwards)
1700                printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1701        else
1702                printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1703        print_lock_name(other);
1704        printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1705
1706        printk("\nother info that might help us debug this:\n");
1707        lockdep_print_held_locks(curr);
1708
1709        printk("\nthe first lock's dependencies:\n");
1710        print_lock_dependencies(this->class, 0);
1711
1712        printk("\nthe second lock's dependencies:\n");
1713        print_lock_dependencies(other, 0);
1714
1715        printk("\nstack backtrace:\n");
1716        dump_stack();
1717
1718        return 0;
1719}
1720
1721/*
1722 * Prove that in the forwards-direction subgraph starting at <this>
1723 * there is no lock matching <mask>:
1724 */
1725static int
1726check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1727                     enum lock_usage_bit bit, const char *irqclass)
1728{
1729        int ret;
1730
1731        find_usage_bit = bit;
1732        /* fills in <forwards_match> */
1733        ret = find_usage_forwards(this->class, 0);
1734        if (!ret || ret == 1)
1735                return ret;
1736
1737        return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1738}
1739
1740/*
1741 * Prove that in the backwards-direction subgraph starting at <this>
1742 * there is no lock matching <mask>:
1743 */
1744static int
1745check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1746                      enum lock_usage_bit bit, const char *irqclass)
1747{
1748        int ret;
1749
1750        find_usage_bit = bit;
1751        /* fills in <backwards_match> */
1752        ret = find_usage_backwards(this->class, 0);
1753        if (!ret || ret == 1)
1754                return ret;
1755
1756        return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1757}
1758
1759void print_irqtrace_events(struct task_struct *curr)
1760{
1761        printk("irq event stamp: %u\n", curr->irq_events);
1762        printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
1763        print_ip_sym(curr->hardirq_enable_ip);
1764        printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1765        print_ip_sym(curr->hardirq_disable_ip);
1766        printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
1767        print_ip_sym(curr->softirq_enable_ip);
1768        printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1769        print_ip_sym(curr->softirq_disable_ip);
1770}
1771
1772static int hardirq_verbose(struct lock_class *class)
1773{
1774#if HARDIRQ_VERBOSE
1775        return class_filter(class);
1776#endif
1777        return 0;
1778}
1779
1780static int softirq_verbose(struct lock_class *class)
1781{
1782#if SOFTIRQ_VERBOSE
1783        return class_filter(class);
1784#endif
1785        return 0;
1786}
1787
1788#define STRICT_READ_CHECKS      1
1789
1790static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1791                enum lock_usage_bit new_bit)
1792{
1793        int ret = 1;
1794
1795        switch(new_bit) {
1796        case LOCK_USED_IN_HARDIRQ:
1797                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1798                        return 0;
1799                if (!valid_state(curr, this, new_bit,
1800                                 LOCK_ENABLED_HARDIRQS_READ))
1801                        return 0;
1802                /*
1803                 * just marked it hardirq-safe, check that this lock
1804                 * took no hardirq-unsafe lock in the past:
1805                 */
1806                if (!check_usage_forwards(curr, this,
1807                                          LOCK_ENABLED_HARDIRQS, "hard"))
1808                        return 0;
1809#if STRICT_READ_CHECKS
1810                /*
1811                 * just marked it hardirq-safe, check that this lock
1812                 * took no hardirq-unsafe-read lock in the past:
1813                 */
1814                if (!check_usage_forwards(curr, this,
1815                                LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1816                        return 0;
1817#endif
1818                if (hardirq_verbose(this->class))
1819                        ret = 2;
1820                break;
1821        case LOCK_USED_IN_SOFTIRQ:
1822                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1823                        return 0;
1824                if (!valid_state(curr, this, new_bit,
1825                                 LOCK_ENABLED_SOFTIRQS_READ))
1826                        return 0;
1827                /*
1828                 * just marked it softirq-safe, check that this lock
1829                 * took no softirq-unsafe lock in the past:
1830                 */
1831                if (!check_usage_forwards(curr, this,
1832                                          LOCK_ENABLED_SOFTIRQS, "soft"))
1833                        return 0;
1834#if STRICT_READ_CHECKS
1835                /*
1836                 * just marked it softirq-safe, check that this lock
1837                 * took no softirq-unsafe-read lock in the past:
1838                 */
1839                if (!check_usage_forwards(curr, this,
1840                                LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
1841                        return 0;
1842#endif
1843                if (softirq_verbose(this->class))
1844                        ret = 2;
1845                break;
1846        case LOCK_USED_IN_HARDIRQ_READ:
1847                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1848                        return 0;
1849                /*
1850                 * just marked it hardirq-read-safe, check that this lock
1851                 * took no hardirq-unsafe lock in the past:
1852                 */
1853                if (!check_usage_forwards(curr, this,
1854                                          LOCK_ENABLED_HARDIRQS, "hard"))
1855                        return 0;
1856                if (hardirq_verbose(this->class))
1857                        ret = 2;
1858                break;
1859        case LOCK_USED_IN_SOFTIRQ_READ:
1860                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1861                        return 0;
1862                /*
1863                 * just marked it softirq-read-safe, check that this lock
1864                 * took no softirq-unsafe lock in the past:
1865                 */
1866                if (!check_usage_forwards(curr, this,
1867                                          LOCK_ENABLED_SOFTIRQS, "soft"))
1868                        return 0;
1869                if (softirq_verbose(this->class))
1870                        ret = 2;
1871                break;
1872        case LOCK_ENABLED_HARDIRQS:
1873                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
1874                        return 0;
1875                if (!valid_state(curr, this, new_bit,
1876                                 LOCK_USED_IN_HARDIRQ_READ))
1877                        return 0;
1878                /*
1879                 * just marked it hardirq-unsafe, check that no hardirq-safe
1880                 * lock in the system ever took it in the past:
1881                 */
1882                if (!check_usage_backwards(curr, this,
1883                                           LOCK_USED_IN_HARDIRQ, "hard"))
1884                        return 0;
1885#if STRICT_READ_CHECKS
1886                /*
1887                 * just marked it hardirq-unsafe, check that no
1888                 * hardirq-safe-read lock in the system ever took
1889                 * it in the past:
1890                 */
1891                if (!check_usage_backwards(curr, this,
1892                                   LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
1893                        return 0;
1894#endif
1895                if (hardirq_verbose(this->class))
1896                        ret = 2;
1897                break;
1898        case LOCK_ENABLED_SOFTIRQS:
1899                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
1900                        return 0;
1901                if (!valid_state(curr, this, new_bit,
1902                                 LOCK_USED_IN_SOFTIRQ_READ))
1903                        return 0;
1904                /*
1905                 * just marked it softirq-unsafe, check that no softirq-safe
1906                 * lock in the system ever took it in the past:
1907                 */
1908                if (!check_usage_backwards(curr, this,
1909                                           LOCK_USED_IN_SOFTIRQ, "soft"))
1910                        return 0;
1911#if STRICT_READ_CHECKS
1912                /*
1913                 * just marked it softirq-unsafe, check that no
1914                 * softirq-safe-read lock in the system ever took
1915                 * it in the past:
1916                 */
1917                if (!check_usage_backwards(curr, this,
1918                                   LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
1919                        return 0;
1920#endif
1921                if (softirq_verbose(this->class))
1922                        ret = 2;
1923                break;
1924        case LOCK_ENABLED_HARDIRQS_READ:
1925                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
1926                        return 0;
1927#if STRICT_READ_CHECKS
1928                /*
1929                 * just marked it hardirq-read-unsafe, check that no
1930                 * hardirq-safe lock in the system ever took it in the past:
1931                 */
1932                if (!check_usage_backwards(curr, this,
1933                                           LOCK_USED_IN_HARDIRQ, "hard"))
1934                        return 0;
1935#endif
1936                if (hardirq_verbose(this->class))
1937                        ret = 2;
1938                break;
1939        case LOCK_ENABLED_SOFTIRQS_READ:
1940                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
1941                        return 0;
1942#if STRICT_READ_CHECKS
1943                /*
1944                 * just marked it softirq-read-unsafe, check that no
1945                 * softirq-safe lock in the system ever took it in the past:
1946                 */
1947                if (!check_usage_backwards(curr, this,
1948                                           LOCK_USED_IN_SOFTIRQ, "soft"))
1949                        return 0;
1950#endif
1951                if (softirq_verbose(this->class))
1952                        ret = 2;
1953                break;
1954        default:
1955                WARN_ON(1);
1956                break;
1957        }
1958
1959        return ret;
1960}
1961
1962/*
1963 * Mark all held locks with a usage bit:
1964 */
1965static int
1966mark_held_locks(struct task_struct *curr, int hardirq)
1967{
1968        enum lock_usage_bit usage_bit;
1969        struct held_lock *hlock;
1970        int i;
1971
1972        for (i = 0; i < curr->lockdep_depth; i++) {
1973                hlock = curr->held_locks + i;
1974
1975                if (hardirq) {
1976                        if (hlock->read)
1977                                usage_bit = LOCK_ENABLED_HARDIRQS_READ;
1978                        else
1979                                usage_bit = LOCK_ENABLED_HARDIRQS;
1980                } else {
1981                        if (hlock->read)
1982                                usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
1983                        else
1984                                usage_bit = LOCK_ENABLED_SOFTIRQS;
1985                }
1986                if (!mark_lock(curr, hlock, usage_bit))
1987                        return 0;
1988        }
1989
1990        return 1;
1991}
1992
1993/*
1994 * Debugging helper: via this flag we know that we are in
1995 * 'early bootup code', and will warn about any invalid irqs-on event:
1996 */
1997static int early_boot_irqs_enabled;
1998
1999void early_boot_irqs_off(void)
2000{
2001        early_boot_irqs_enabled = 0;
2002}
2003
2004void early_boot_irqs_on(void)
2005{
2006        early_boot_irqs_enabled = 1;
2007}
2008
2009/*
2010 * Hardirqs will be enabled:
2011 */
2012void trace_hardirqs_on(void)
2013{
2014        struct task_struct *curr = current;
2015        unsigned long ip;
2016
2017        if (unlikely(!debug_locks || current->lockdep_recursion))
2018                return;
2019
2020        if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
2021                return;
2022
2023        if (unlikely(curr->hardirqs_enabled)) {
2024                debug_atomic_inc(&redundant_hardirqs_on);
2025                return;
2026        }
2027        /* we'll do an OFF -> ON transition: */
2028        curr->hardirqs_enabled = 1;
2029        ip = (unsigned long) __builtin_return_address(0);
2030
2031        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2032                return;
2033        if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2034                return;
2035        /*
2036         * We are going to turn hardirqs on, so set the
2037         * usage bit for all held locks:
2038         */
2039        if (!mark_held_locks(curr, 1))
2040                return;
2041        /*
2042         * If we have softirqs enabled, then set the usage
2043         * bit for all held locks. (disabled hardirqs prevented
2044         * this bit from being set before)
2045         */
2046        if (curr->softirqs_enabled)
2047                if (!mark_held_locks(curr, 0))
2048                        return;
2049
2050        curr->hardirq_enable_ip = ip;
2051        curr->hardirq_enable_event = ++curr->irq_events;
2052        debug_atomic_inc(&hardirqs_on_events);
2053}
2054
2055EXPORT_SYMBOL(trace_hardirqs_on);
2056
2057/*
2058 * Hardirqs were disabled:
2059 */
2060void trace_hardirqs_off(void)
2061{
2062        struct task_struct *curr = current;
2063
2064        if (unlikely(!debug_locks || current->lockdep_recursion))
2065                return;
2066
2067        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2068                return;
2069
2070        if (curr->hardirqs_enabled) {
2071                /*
2072                 * We have done an ON -> OFF transition:
2073                 */
2074                curr->hardirqs_enabled = 0;
2075                curr->hardirq_disable_ip = _RET_IP_;
2076                curr->hardirq_disable_event = ++curr->irq_events;
2077                debug_atomic_inc(&hardirqs_off_events);
2078        } else
2079                debug_atomic_inc(&redundant_hardirqs_off);
2080}
2081
2082EXPORT_SYMBOL(trace_hardirqs_off);
2083
2084/*
2085 * Softirqs will be enabled:
2086 */
2087void trace_softirqs_on(unsigned long ip)
2088{
2089        struct task_struct *curr = current;
2090
2091        if (unlikely(!debug_locks))
2092                return;
2093
2094        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2095                return;
2096
2097        if (curr->softirqs_enabled) {
2098                debug_atomic_inc(&redundant_softirqs_on);
2099                return;
2100        }
2101
2102        /*
2103         * We'll do an OFF -> ON transition:
2104         */
2105        curr->softirqs_enabled = 1;
2106        curr->softirq_enable_ip = ip;
2107        curr->softirq_enable_event = ++curr->irq_events;
2108        debug_atomic_inc(&softirqs_on_events);
2109        /*
2110         * We are going to turn softirqs on, so set the
2111         * usage bit for all held locks, if hardirqs are
2112         * enabled too:
2113         */
2114        if (curr->hardirqs_enabled)
2115                mark_held_locks(curr, 0);
2116}
2117
2118/*
2119 * Softirqs were disabled:
2120 */
2121void trace_softirqs_off(unsigned long ip)
2122{
2123        struct task_struct *curr = current;
2124
2125        if (unlikely(!debug_locks))
2126                return;
2127
2128        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2129                return;
2130
2131        if (curr->softirqs_enabled) {
2132                /*
2133                 * We have done an ON -> OFF transition:
2134                 */
2135                curr->softirqs_enabled = 0;
2136                curr->softirq_disable_ip = ip;
2137                curr->softirq_disable_event = ++curr->irq_events;
2138                debug_atomic_inc(&softirqs_off_events);
2139                DEBUG_LOCKS_WARN_ON(!softirq_count());
2140        } else
2141                debug_atomic_inc(&redundant_softirqs_off);
2142}
2143
2144static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2145{
2146        /*
2147         * If non-trylock use in a hardirq or softirq context, then
2148         * mark the lock as used in these contexts:
2149         */
2150        if (!hlock->trylock) {
2151                if (hlock->read) {
2152                        if (curr->hardirq_context)
2153                                if (!mark_lock(curr, hlock,
2154                                                LOCK_USED_IN_HARDIRQ_READ))
2155                                        return 0;
2156                        if (curr->softirq_context)
2157                                if (!mark_lock(curr, hlock,
2158                                                LOCK_USED_IN_SOFTIRQ_READ))
2159                                        return 0;
2160                } else {
2161                        if (curr->hardirq_context)
2162                                if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2163                                        return 0;
2164                        if (curr->softirq_context)
2165                                if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2166                                        return 0;
2167                }
2168        }
2169        if (!hlock->hardirqs_off) {
2170                if (hlock->read) {
2171                        if (!mark_lock(curr, hlock,
2172                                        LOCK_ENABLED_HARDIRQS_READ))
2173                                return 0;
2174                        if (curr->softirqs_enabled)
2175                                if (!mark_lock(curr, hlock,
2176                                                LOCK_ENABLED_SOFTIRQS_READ))
2177                                        return 0;
2178                } else {
2179                        if (!mark_lock(curr, hlock,
2180                                        LOCK_ENABLED_HARDIRQS))
2181                                return 0;
2182                        if (curr->softirqs_enabled)
2183                                if (!mark_lock(curr, hlock,
2184                                                LOCK_ENABLED_SOFTIRQS))
2185                                        return 0;
2186                }
2187        }
2188
2189        return 1;
2190}
2191
2192static int separate_irq_context(struct task_struct *curr,
2193                struct held_lock *hlock)
2194{
2195        unsigned int depth = curr->lockdep_depth;
2196
2197        /*
2198         * Keep track of points where we cross into an interrupt context:
2199         */
2200        hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2201                                curr->softirq_context;
2202        if (depth) {
2203                struct held_lock *prev_hlock;
2204
2205                prev_hlock = curr->held_locks + depth-1;
2206                /*
2207                 * If we cross into another context, reset the
2208                 * hash key (this also prevents the checking and the
2209                 * adding of the dependency to 'prev'):
2210                 */
2211                if (prev_hlock->irq_context != hlock->irq_context)
2212                        return 1;
2213        }
2214        return 0;
2215}
2216
2217#else
2218
2219static inline
2220int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2221                enum lock_usage_bit new_bit)
2222{
2223        WARN_ON(1);
2224        return 1;
2225}
2226
2227static inline int mark_irqflags(struct task_struct *curr,
2228                struct held_lock *hlock)
2229{
2230        return 1;
2231}
2232
2233static inline int separate_irq_context(struct task_struct *curr,
2234                struct held_lock *hlock)
2235{
2236        return 0;
2237}
2238
2239#endif
2240
2241/*
2242 * Mark a lock with a usage bit, and validate the state transition:
2243 */
2244static int mark_lock(struct task_struct *curr, struct held_lock *this,
2245                     enum lock_usage_bit new_bit)
2246{
2247        unsigned int new_mask = 1 << new_bit, ret = 1;
2248
2249        /*
2250         * If already set then do not dirty the cacheline,
2251         * nor do any checks:
2252         */
2253        if (likely(this->class->usage_mask & new_mask))
2254                return 1;
2255
2256        if (!graph_lock())
2257                return 0;
2258        /*
2259         * Make sure we didnt race:
2260         */
2261        if (unlikely(this->class->usage_mask & new_mask)) {
2262                graph_unlock();
2263                return 1;
2264        }
2265
2266        this->class->usage_mask |= new_mask;
2267
2268        if (!save_trace(this->class->usage_traces + new_bit))
2269                return 0;
2270
2271        switch (new_bit) {
2272        case LOCK_USED_IN_HARDIRQ:
2273        case LOCK_USED_IN_SOFTIRQ:
2274        case LOCK_USED_IN_HARDIRQ_READ:
2275        case LOCK_USED_IN_SOFTIRQ_READ:
2276        case LOCK_ENABLED_HARDIRQS:
2277        case LOCK_ENABLED_SOFTIRQS:
2278        case LOCK_ENABLED_HARDIRQS_READ:
2279        case LOCK_ENABLED_SOFTIRQS_READ:
2280                ret = mark_lock_irq(curr, this, new_bit);
2281                if (!ret)
2282                        return 0;
2283                break;
2284        case LOCK_USED:
2285                /*
2286                 * Add it to the global list of classes:
2287                 */
2288                list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
2289                debug_atomic_dec(&nr_unused_locks);
2290                break;
2291        default:
2292                if (!debug_locks_off_graph_unlock())
2293                        return 0;
2294                WARN_ON(1);
2295                return 0;
2296        }
2297
2298        graph_unlock();
2299
2300        /*
2301         * We must printk outside of the graph_lock:
2302         */
2303        if (ret == 2) {
2304                printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2305                print_lock(this);
2306                print_irqtrace_events(curr);
2307                dump_stack();
2308        }
2309
2310        return ret;
2311}
2312
2313/*
2314 * Initialize a lock instance's lock-class mapping info:
2315 */
2316void lockdep_init_map(struct lockdep_map *lock, const char *name,
2317                      struct lock_class_key *key, int subclass)
2318{
2319        if (unlikely(!debug_locks))
2320                return;
2321
2322        if (DEBUG_LOCKS_WARN_ON(!key))
2323                return;
2324        if (DEBUG_LOCKS_WARN_ON(!name))
2325                return;
2326        /*
2327         * Sanity check, the lock-class key must be persistent:
2328         */
2329        if (!static_obj(key)) {
2330                printk("BUG: key %p not in .data!\n", key);
2331                DEBUG_LOCKS_WARN_ON(1);
2332                return;
2333        }
2334        lock->name = name;
2335        lock->key = key;
2336        lock->class_cache = NULL;
2337#ifdef CONFIG_LOCK_STAT
2338        lock->cpu = raw_smp_processor_id();
2339#endif
2340        if (subclass)
2341                register_lock_class(lock, subclass, 1);
2342}
2343
2344EXPORT_SYMBOL_GPL(lockdep_init_map);
2345
2346/*
2347 * This gets called for every mutex_lock*()/spin_lock*() operation.
2348 * We maintain the dependency maps and validate the locking attempt:
2349 */
2350static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2351                          int trylock, int read, int check, int hardirqs_off,
2352                          unsigned long ip)
2353{
2354        struct task_struct *curr = current;
2355        struct lock_class *class = NULL;
2356        struct held_lock *hlock;
2357        unsigned int depth, id;
2358        int chain_head = 0;
2359        u64 chain_key;
2360
2361        if (!prove_locking)
2362                check = 1;
2363
2364        if (unlikely(!debug_locks))
2365                return 0;
2366
2367        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2368                return 0;
2369
2370        if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2371                debug_locks_off();
2372                printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2373                printk("turning off the locking correctness validator.\n");
2374                return 0;
2375        }
2376
2377        if (!subclass)
2378                class = lock->class_cache;
2379        /*
2380         * Not cached yet or subclass?
2381         */
2382        if (unlikely(!class)) {
2383                class = register_lock_class(lock, subclass, 0);
2384                if (!class)
2385                        return 0;
2386        }
2387        debug_atomic_inc((atomic_t *)&class->ops);
2388        if (very_verbose(class)) {
2389                printk("\nacquire class [%p] %s", class->key, class->name);
2390                if (class->name_version > 1)
2391                        printk("#%d", class->name_version);
2392                printk("\n");
2393                dump_stack();
2394        }
2395
2396        /*
2397         * Add the lock to the list of currently held locks.
2398         * (we dont increase the depth just yet, up until the
2399         * dependency checks are done)
2400         */
2401        depth = curr->lockdep_depth;
2402        if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2403                return 0;
2404
2405        hlock = curr->held_locks + depth;
2406
2407        hlock->class = class;
2408        hlock->acquire_ip = ip;
2409        hlock->instance = lock;
2410        hlock->trylock = trylock;
2411        hlock->read = read;
2412        hlock->check = check;
2413        hlock->hardirqs_off = hardirqs_off;
2414#ifdef CONFIG_LOCK_STAT
2415        hlock->waittime_stamp = 0;
2416        hlock->holdtime_stamp = sched_clock();
2417#endif
2418
2419        if (check == 2 && !mark_irqflags(curr, hlock))
2420                return 0;
2421
2422        /* mark it as used: */
2423        if (!mark_lock(curr, hlock, LOCK_USED))
2424                return 0;
2425
2426        /*
2427         * Calculate the chain hash: it's the combined hash of all the
2428         * lock keys along the dependency chain. We save the hash value
2429         * at every step so that we can get the current hash easily
2430         * after unlock. The chain hash is then used to cache dependency
2431         * results.
2432         *
2433         * The 'key ID' is what is the most compact key value to drive
2434         * the hash, not class->key.
2435         */
2436        id = class - lock_classes;
2437        if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2438                return 0;
2439
2440        chain_key = curr->curr_chain_key;
2441        if (!depth) {
2442                if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2443                        return 0;
2444                chain_head = 1;
2445        }
2446
2447        hlock->prev_chain_key = chain_key;
2448        if (separate_irq_context(curr, hlock)) {
2449                chain_key = 0;
2450                chain_head = 1;
2451        }
2452        chain_key = iterate_chain_key(chain_key, id);
2453
2454        if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
2455                return 0;
2456
2457        curr->curr_chain_key = chain_key;
2458        curr->lockdep_depth++;
2459        check_chain_key(curr);
2460#ifdef CONFIG_DEBUG_LOCKDEP
2461        if (unlikely(!debug_locks))
2462                return 0;
2463#endif
2464        if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2465                debug_locks_off();
2466                printk("BUG: MAX_LOCK_DEPTH too low!\n");
2467                printk("turning off the locking correctness validator.\n");
2468                return 0;
2469        }
2470
2471        if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2472                max_lockdep_depth = curr->lockdep_depth;
2473
2474        return 1;
2475}
2476
2477static int
2478print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2479                           unsigned long ip)
2480{
2481        if (!debug_locks_off())
2482                return 0;
2483        if (debug_locks_silent)
2484                return 0;
2485
2486        printk("\n=====================================\n");
2487        printk(  "[ BUG: bad unlock balance detected! ]\n");
2488        printk(  "-------------------------------------\n");
2489        printk("%s/%d is trying to release lock (",
2490                curr->comm, task_pid_nr(curr));
2491        print_lockdep_cache(lock);
2492        printk(") at:\n");
2493        print_ip_sym(ip);
2494        printk("but there are no more locks to release!\n");
2495        printk("\nother info that might help us debug this:\n");
2496        lockdep_print_held_locks(curr);
2497
2498        printk("\nstack backtrace:\n");
2499        dump_stack();
2500
2501        return 0;
2502}
2503
2504/*
2505 * Common debugging checks for both nested and non-nested unlock:
2506 */
2507static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2508                        unsigned long ip)
2509{
2510        if (unlikely(!debug_locks))
2511                return 0;
2512        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2513                return 0;
2514
2515        if (curr->lockdep_depth <= 0)
2516                return print_unlock_inbalance_bug(curr, lock, ip);
2517
2518        return 1;
2519}
2520
2521/*
2522 * Remove the lock to the list of currently held locks in a
2523 * potentially non-nested (out of order) manner. This is a
2524 * relatively rare operation, as all the unlock APIs default
2525 * to nested mode (which uses lock_release()):
2526 */
2527static int
2528lock_release_non_nested(struct task_struct *curr,
2529                        struct lockdep_map *lock, unsigned long ip)
2530{
2531        struct held_lock *hlock, *prev_hlock;
2532        unsigned int depth;
2533        int i;
2534
2535        /*
2536         * Check whether the lock exists in the current stack
2537         * of held locks:
2538         */
2539        depth = curr->lockdep_depth;
2540        if (DEBUG_LOCKS_WARN_ON(!depth))
2541                return 0;
2542
2543        prev_hlock = NULL;
2544        for (i = depth-1; i >= 0; i--) {
2545                hlock = curr->held_locks + i;
2546                /*
2547                 * We must not cross into another context:
2548                 */
2549                if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2550                        break;
2551                if (hlock->instance == lock)
2552                        goto found_it;
2553                prev_hlock = hlock;
2554        }
2555        return print_unlock_inbalance_bug(curr, lock, ip);
2556
2557found_it:
2558        lock_release_holdtime(hlock);
2559
2560        /*
2561         * We have the right lock to unlock, 'hlock' points to it.
2562         * Now we remove it from the stack, and add back the other
2563         * entries (if any), recalculating the hash along the way:
2564         */
2565        curr->lockdep_depth = i;
2566        curr->curr_chain_key = hlock->prev_chain_key;
2567
2568        for (i++; i < depth; i++) {
2569                hlock = curr->held_locks + i;
2570                if (!__lock_acquire(hlock->instance,
2571                        hlock->class->subclass, hlock->trylock,
2572                                hlock->read, hlock->check, hlock->hardirqs_off,
2573                                hlock->acquire_ip))
2574                        return 0;
2575        }
2576
2577        if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2578                return 0;
2579        return 1;
2580}
2581
2582/*
2583 * Remove the lock to the list of currently held locks - this gets
2584 * called on mutex_unlock()/spin_unlock*() (or on a failed
2585 * mutex_lock_interruptible()). This is done for unlocks that nest
2586 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2587 */
2588static int lock_release_nested(struct task_struct *curr,
2589                               struct lockdep_map *lock, unsigned long ip)
2590{
2591        struct held_lock *hlock;
2592        unsigned int depth;
2593
2594        /*
2595         * Pop off the top of the lock stack:
2596         */
2597        depth = curr->lockdep_depth - 1;
2598        hlock = curr->held_locks + depth;
2599
2600        /*
2601         * Is the unlock non-nested:
2602         */
2603        if (hlock->instance != lock)
2604                return lock_release_non_nested(curr, lock, ip);
2605        curr->lockdep_depth--;
2606
2607        if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2608                return 0;
2609
2610        curr->curr_chain_key = hlock->prev_chain_key;
2611
2612        lock_release_holdtime(hlock);
2613
2614#ifdef CONFIG_DEBUG_LOCKDEP
2615        hlock->prev_chain_key = 0;
2616        hlock->class = NULL;
2617        hlock->acquire_ip = 0;
2618        hlock->irq_context = 0;
2619#endif
2620        return 1;
2621}
2622
2623/*
2624 * Remove the lock to the list of currently held locks - this gets
2625 * called on mutex_unlock()/spin_unlock*() (or on a failed
2626 * mutex_lock_interruptible()). This is done for unlocks that nest
2627 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2628 */
2629static void
2630__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2631{
2632        struct task_struct *curr = current;
2633
2634        if (!check_unlock(curr, lock, ip))
2635                return;
2636
2637        if (nested) {
2638                if (!lock_release_nested(curr, lock, ip))
2639                        return;
2640        } else {
2641                if (!lock_release_non_nested(curr, lock, ip))
2642                        return;
2643        }
2644
2645        check_chain_key(curr);
2646}
2647
2648/*
2649 * Check whether we follow the irq-flags state precisely:
2650 */
2651static void check_flags(unsigned long flags)
2652{
2653#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
2654        if (!debug_locks)
2655                return;
2656
2657        if (irqs_disabled_flags(flags)) {
2658                if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
2659                        printk("possible reason: unannotated irqs-off.\n");
2660                }
2661        } else {
2662                if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
2663                        printk("possible reason: unannotated irqs-on.\n");
2664                }
2665        }
2666
2667        /*
2668         * We dont accurately track softirq state in e.g.
2669         * hardirq contexts (such as on 4KSTACKS), so only
2670         * check if not in hardirq contexts:
2671         */
2672        if (!hardirq_count()) {
2673                if (softirq_count())
2674                        DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2675                else
2676                        DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2677        }
2678
2679        if (!debug_locks)
2680                print_irqtrace_events(current);
2681#endif
2682}
2683
2684/*
2685 * We are not always called with irqs disabled - do that here,
2686 * and also avoid lockdep recursion:
2687 */
2688void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2689                  int trylock, int read, int check, unsigned long ip)
2690{
2691        unsigned long flags;
2692
2693        if (unlikely(!lock_stat && !prove_locking))
2694                return;
2695
2696        if (unlikely(current->lockdep_recursion))
2697                return;
2698
2699        raw_local_irq_save(flags);
2700        check_flags(flags);
2701
2702        current->lockdep_recursion = 1;
2703        __lock_acquire(lock, subclass, trylock, read, check,
2704                       irqs_disabled_flags(flags), ip);
2705        current->lockdep_recursion = 0;
2706        raw_local_irq_restore(flags);
2707}
2708
2709EXPORT_SYMBOL_GPL(lock_acquire);
2710
2711void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2712{
2713        unsigned long flags;
2714
2715        if (unlikely(!lock_stat && !prove_locking))
2716                return;
2717
2718        if (unlikely(current->lockdep_recursion))
2719                return;
2720
2721        raw_local_irq_save(flags);
2722        check_flags(flags);
2723        current->lockdep_recursion = 1;
2724        __lock_release(lock, nested, ip);
2725        current->lockdep_recursion = 0;
2726        raw_local_irq_restore(flags);
2727}
2728
2729EXPORT_SYMBOL_GPL(lock_release);
2730
2731#ifdef CONFIG_LOCK_STAT
2732static int
2733print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
2734                           unsigned long ip)
2735{
2736        if (!debug_locks_off())
2737                return 0;
2738        if (debug_locks_silent)
2739                return 0;
2740
2741        printk("\n=================================\n");
2742        printk(  "[ BUG: bad contention detected! ]\n");
2743        printk(  "---------------------------------\n");
2744        printk("%s/%d is trying to contend lock (",
2745                curr->comm, task_pid_nr(curr));
2746        print_lockdep_cache(lock);
2747        printk(") at:\n");
2748        print_ip_sym(ip);
2749        printk("but there are no locks held!\n");
2750        printk("\nother info that might help us debug this:\n");
2751        lockdep_print_held_locks(curr);
2752
2753        printk("\nstack backtrace:\n");
2754        dump_stack();
2755
2756        return 0;
2757}
2758
2759static void
2760__lock_contended(struct lockdep_map *lock, unsigned long ip)
2761{
2762        struct task_struct *curr = current;
2763        struct held_lock *hlock, *prev_hlock;
2764        struct lock_class_stats *stats;
2765        unsigned int depth;
2766        int i, point;
2767
2768        depth = curr->lockdep_depth;
2769        if (DEBUG_LOCKS_WARN_ON(!depth))
2770                return;
2771
2772        prev_hlock = NULL;
2773        for (i = depth-1; i >= 0; i--) {
2774                hlock = curr->held_locks + i;
2775                /*
2776                 * We must not cross into another context:
2777                 */
2778                if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2779                        break;
2780                if (hlock->instance == lock)
2781                        goto found_it;
2782                prev_hlock = hlock;
2783        }
2784        print_lock_contention_bug(curr, lock, ip);
2785        return;
2786
2787found_it:
2788        hlock->waittime_stamp = sched_clock();
2789
2790        point = lock_contention_point(hlock->class, ip);
2791
2792        stats = get_lock_stats(hlock->class);
2793        if (point < ARRAY_SIZE(stats->contention_point))
2794                stats->contention_point[i]++;
2795        if (lock->cpu != smp_processor_id())
2796                stats->bounces[bounce_contended + !!hlock->read]++;
2797        put_lock_stats(stats);
2798}
2799
2800static void
2801__lock_acquired(struct lockdep_map *lock)
2802{
2803        struct task_struct *curr = current;
2804        struct held_lock *hlock, *prev_hlock;
2805        struct lock_class_stats *stats;
2806        unsigned int depth;
2807        u64 now;
2808        s64 waittime = 0;
2809        int i, cpu;
2810
2811        depth = curr->lockdep_depth;
2812        if (DEBUG_LOCKS_WARN_ON(!depth))
2813                return;
2814
2815        prev_hlock = NULL;
2816        for (i = depth-1; i >= 0; i--) {
2817                hlock = curr->held_locks + i;
2818                /*
2819                 * We must not cross into another context:
2820                 */
2821                if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2822                        break;
2823                if (hlock->instance == lock)
2824                        goto found_it;
2825                prev_hlock = hlock;
2826        }
2827        print_lock_contention_bug(curr, lock, _RET_IP_);
2828        return;
2829
2830found_it:
2831        cpu = smp_processor_id();
2832        if (hlock->waittime_stamp) {
2833                now = sched_clock();
2834                waittime = now - hlock->waittime_stamp;
2835                hlock->holdtime_stamp = now;
2836        }
2837
2838        stats = get_lock_stats(hlock->class);
2839        if (waittime) {
2840                if (hlock->read)
2841                        lock_time_inc(&stats->read_waittime, waittime);
2842                else
2843                        lock_time_inc(&stats->write_waittime, waittime);
2844        }
2845        if (lock->cpu != cpu)
2846                stats->bounces[bounce_acquired + !!hlock->read]++;
2847        put_lock_stats(stats);
2848
2849        lock->cpu = cpu;
2850}
2851
2852void lock_contended(struct lockdep_map *lock, unsigned long ip)
2853{
2854        unsigned long flags;
2855
2856        if (unlikely(!lock_stat))
2857                return;
2858
2859        if (unlikely(current->lockdep_recursion))
2860                return;
2861
2862        raw_local_irq_save(flags);
2863        check_flags(flags);
2864        current->lockdep_recursion = 1;
2865        __lock_contended(lock, ip);
2866        current->lockdep_recursion = 0;
2867        raw_local_irq_restore(flags);
2868}
2869EXPORT_SYMBOL_GPL(lock_contended);
2870
2871void lock_acquired(struct lockdep_map *lock)
2872{
2873        unsigned long flags;
2874
2875        if (unlikely(!lock_stat))
2876                return;
2877
2878        if (unlikely(current->lockdep_recursion))
2879                return;
2880
2881        raw_local_irq_save(flags);
2882        check_flags(flags);
2883        current->lockdep_recursion = 1;
2884        __lock_acquired(lock);
2885        current->lockdep_recursion = 0;
2886        raw_local_irq_restore(flags);
2887}
2888EXPORT_SYMBOL_GPL(lock_acquired);
2889#endif
2890
2891/*
2892 * Used by the testsuite, sanitize the validator state
2893 * after a simulated failure:
2894 */
2895
2896void lockdep_reset(void)
2897{
2898        unsigned long flags;
2899        int i;
2900
2901        raw_local_irq_save(flags);
2902        current->curr_chain_key = 0;
2903        current->lockdep_depth = 0;
2904        current->lockdep_recursion = 0;
2905        memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
2906        nr_hardirq_chains = 0;
2907        nr_softirq_chains = 0;
2908        nr_process_chains = 0;
2909        debug_locks = 1;
2910        for (i = 0; i < CHAINHASH_SIZE; i++)
2911                INIT_LIST_HEAD(chainhash_table + i);
2912        raw_local_irq_restore(flags);
2913}
2914
2915static void zap_class(struct lock_class *class)
2916{
2917        int i;
2918
2919        /*
2920         * Remove all dependencies this lock is
2921         * involved in:
2922         */
2923        for (i = 0; i < nr_list_entries; i++) {
2924                if (list_entries[i].class == class)
2925                        list_del_rcu(&list_entries[i].entry);
2926        }
2927        /*
2928         * Unhash the class and remove it from the all_lock_classes list:
2929         */
2930        list_del_rcu(&class->hash_entry);
2931        list_del_rcu(&class->lock_entry);
2932
2933}
2934
2935static inline int within(const void *addr, void *start, unsigned long size)
2936{
2937        return addr >= start && addr < start + size;
2938}
2939
2940void lockdep_free_key_range(void *start, unsigned long size)
2941{
2942        struct lock_class *class, *next;
2943        struct list_head *head;
2944        unsigned long flags;
2945        int i;
2946        int locked;
2947
2948        raw_local_irq_save(flags);
2949        locked = graph_lock();
2950
2951        /*
2952         * Unhash all classes that were created by this module:
2953         */
2954        for (i = 0; i < CLASSHASH_SIZE; i++) {
2955                head = classhash_table + i;
2956                if (list_empty(head))
2957                        continue;
2958                list_for_each_entry_safe(class, next, head, hash_entry) {
2959                        if (within(class->key, start, size))
2960                                zap_class(class);
2961                        else if (within(class->name, start, size))
2962                                zap_class(class);
2963                }
2964        }
2965
2966        if (locked)
2967                graph_unlock();
2968        raw_local_irq_restore(flags);
2969}
2970
2971void lockdep_reset_lock(struct lockdep_map *lock)
2972{
2973        struct lock_class *class, *next;
2974        struct list_head *head;
2975        unsigned long flags;
2976        int i, j;
2977        int locked;
2978
2979        raw_local_irq_save(flags);
2980
2981        /*
2982         * Remove all classes this lock might have:
2983         */
2984        for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
2985                /*
2986                 * If the class exists we look it up and zap it:
2987                 */
2988                class = look_up_lock_class(lock, j);
2989                if (class)
2990                        zap_class(class);
2991        }
2992        /*
2993         * Debug check: in the end all mapped classes should
2994         * be gone.
2995         */
2996        locked = graph_lock();
2997        for (i = 0; i < CLASSHASH_SIZE; i++) {
2998                head = classhash_table + i;
2999                if (list_empty(head))
3000                        continue;
3001                list_for_each_entry_safe(class, next, head, hash_entry) {
3002                        if (unlikely(class == lock->class_cache)) {
3003                                if (debug_locks_off_graph_unlock())
3004                                        WARN_ON(1);
3005                                goto out_restore;
3006                        }
3007                }
3008        }
3009        if (locked)
3010                graph_unlock();
3011
3012out_restore:
3013        raw_local_irq_restore(flags);
3014}
3015
3016void lockdep_init(void)
3017{
3018        int i;
3019
3020        /*
3021         * Some architectures have their own start_kernel()
3022         * code which calls lockdep_init(), while we also
3023         * call lockdep_init() from the start_kernel() itself,
3024         * and we want to initialize the hashes only once:
3025         */
3026        if (lockdep_initialized)
3027                return;
3028
3029        for (i = 0; i < CLASSHASH_SIZE; i++)
3030                INIT_LIST_HEAD(classhash_table + i);
3031
3032        for (i = 0; i < CHAINHASH_SIZE; i++)
3033                INIT_LIST_HEAD(chainhash_table + i);
3034
3035        lockdep_initialized = 1;
3036}
3037
3038void __init lockdep_info(void)
3039{
3040        printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3041
3042        printk("... MAX_LOCKDEP_SUBCLASSES:    %lu\n", MAX_LOCKDEP_SUBCLASSES);
3043        printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
3044        printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
3045        printk("... CLASSHASH_SIZE:           %lu\n", CLASSHASH_SIZE);
3046        printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
3047        printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
3048        printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
3049
3050        printk(" memory used by lock dependency info: %lu kB\n",
3051                (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3052                sizeof(struct list_head) * CLASSHASH_SIZE +
3053                sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3054                sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3055                sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
3056
3057        printk(" per task-struct memory footprint: %lu bytes\n",
3058                sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3059
3060#ifdef CONFIG_DEBUG_LOCKDEP
3061        if (lockdep_init_error) {
3062                printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3063                printk("Call stack leading to lockdep invocation was:\n");
3064                print_stack_trace(&lockdep_init_trace, 0);
3065        }
3066#endif
3067}
3068
3069static void
3070print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3071                     const void *mem_to, struct held_lock *hlock)
3072{
3073        if (!debug_locks_off())
3074                return;
3075        if (debug_locks_silent)
3076                return;
3077
3078        printk("\n=========================\n");
3079        printk(  "[ BUG: held lock freed! ]\n");
3080        printk(  "-------------------------\n");
3081        printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3082                curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3083        print_lock(hlock);
3084        lockdep_print_held_locks(curr);
3085
3086        printk("\nstack backtrace:\n");
3087        dump_stack();
3088}
3089
3090static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3091                                const void* lock_from, unsigned long lock_len)
3092{
3093        return lock_from + lock_len <= mem_from ||
3094                mem_from + mem_len <= lock_from;
3095}
3096
3097/*
3098 * Called when kernel memory is freed (or unmapped), or if a lock
3099 * is destroyed or reinitialized - this code checks whether there is
3100 * any held lock in the memory range of <from> to <to>:
3101 */
3102void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3103{
3104        struct task_struct *curr = current;
3105        struct held_lock *hlock;
3106        unsigned long flags;
3107        int i;
3108
3109        if (unlikely(!debug_locks))
3110                return;
3111
3112        local_irq_save(flags);
3113        for (i = 0; i < curr->lockdep_depth; i++) {
3114                hlock = curr->held_locks + i;
3115
3116                if (not_in_range(mem_from, mem_len, hlock->instance,
3117                                        sizeof(*hlock->instance)))
3118                        continue;
3119
3120                print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3121                break;
3122        }
3123        local_irq_restore(flags);
3124}
3125EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
3126
3127static void print_held_locks_bug(struct task_struct *curr)
3128{
3129        if (!debug_locks_off())
3130                return;
3131        if (debug_locks_silent)
3132                return;
3133
3134        printk("\n=====================================\n");
3135        printk(  "[ BUG: lock held at task exit time! ]\n");
3136        printk(  "-------------------------------------\n");
3137        printk("%s/%d is exiting with locks still held!\n",
3138                curr->comm, task_pid_nr(curr));
3139        lockdep_print_held_locks(curr);
3140
3141        printk("\nstack backtrace:\n");
3142        dump_stack();
3143}
3144
3145void debug_check_no_locks_held(struct task_struct *task)
3146{
3147        if (unlikely(task->lockdep_depth > 0))
3148                print_held_locks_bug(task);
3149}
3150
3151void debug_show_all_locks(void)
3152{
3153        struct task_struct *g, *p;
3154        int count = 10;
3155        int unlock = 1;
3156
3157        if (unlikely(!debug_locks)) {
3158                printk("INFO: lockdep is turned off.\n");
3159                return;
3160        }
3161        printk("\nShowing all locks held in the system:\n");
3162
3163        /*
3164         * Here we try to get the tasklist_lock as hard as possible,
3165         * if not successful after 2 seconds we ignore it (but keep
3166         * trying). This is to enable a debug printout even if a
3167         * tasklist_lock-holding task deadlocks or crashes.
3168         */
3169retry:
3170        if (!read_trylock(&tasklist_lock)) {
3171                if (count == 10)
3172                        printk("hm, tasklist_lock locked, retrying... ");
3173                if (count) {
3174                        count--;
3175                        printk(" #%d", 10-count);
3176                        mdelay(200);
3177                        goto retry;
3178                }
3179                printk(" ignoring it.\n");
3180                unlock = 0;
3181        }
3182        if (count != 10)
3183                printk(" locked it.\n");
3184
3185        do_each_thread(g, p) {
3186                /*
3187                 * It's not reliable to print a task's held locks
3188                 * if it's not sleeping (or if it's not the current
3189                 * task):
3190                 */
3191                if (p->state == TASK_RUNNING && p != current)
3192                        continue;
3193                if (p->lockdep_depth)
3194                        lockdep_print_held_locks(p);
3195                if (!unlock)
3196                        if (read_trylock(&tasklist_lock))
3197                                unlock = 1;
3198        } while_each_thread(g, p);
3199
3200        printk("\n");
3201        printk("=============================================\n\n");
3202
3203        if (unlock)
3204                read_unlock(&tasklist_lock);
3205}
3206
3207EXPORT_SYMBOL_GPL(debug_show_all_locks);
3208
3209void debug_show_held_locks(struct task_struct *task)
3210{
3211        if (unlikely(!debug_locks)) {
3212                printk("INFO: lockdep is turned off.\n");
3213                return;
3214        }
3215        lockdep_print_held_locks(task);
3216}
3217
3218EXPORT_SYMBOL_GPL(debug_show_held_locks);
3219
3220void lockdep_sys_exit(void)
3221{
3222        struct task_struct *curr = current;
3223
3224        if (unlikely(curr->lockdep_depth)) {
3225                if (!debug_locks_off())
3226                        return;
3227                printk("\n================================================\n");
3228                printk(  "[ BUG: lock held when returning to user space! ]\n");
3229                printk(  "------------------------------------------------\n");
3230                printk("%s/%d is leaving the kernel with locks still held!\n",
3231                                curr->comm, curr->pid);
3232                lockdep_print_held_locks(curr);
3233        }
3234}
3235