linux/lib/debugobjects.c
<<
>>
Prefs
   1/*
   2 * Generic infrastructure for lifetime debugging of objects.
   3 *
   4 * Started by Thomas Gleixner
   5 *
   6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
   7 *
   8 * For licencing details see kernel-base/COPYING
   9 */
  10
  11#define pr_fmt(fmt) "ODEBUG: " fmt
  12
  13#include <linux/debugobjects.h>
  14#include <linux/interrupt.h>
  15#include <linux/sched.h>
  16#include <linux/sched/task_stack.h>
  17#include <linux/seq_file.h>
  18#include <linux/debugfs.h>
  19#include <linux/slab.h>
  20#include <linux/hash.h>
  21#include <linux/kmemleak.h>
  22
  23#define ODEBUG_HASH_BITS        14
  24#define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
  25
  26#define ODEBUG_POOL_SIZE        1024
  27#define ODEBUG_POOL_MIN_LEVEL   256
  28
  29#define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
  30#define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
  31#define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
  32
  33struct debug_bucket {
  34        struct hlist_head       list;
  35        raw_spinlock_t          lock;
  36};
  37
  38static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
  39
  40static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  41
  42static DEFINE_RAW_SPINLOCK(pool_lock);
  43
  44static HLIST_HEAD(obj_pool);
  45static HLIST_HEAD(obj_to_free);
  46
  47static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
  48static int                      obj_pool_free = ODEBUG_POOL_SIZE;
  49static int                      obj_pool_used;
  50static int                      obj_pool_max_used;
  51/* The number of objs on the global free list */
  52static int                      obj_nr_tofree;
  53static struct kmem_cache        *obj_cache;
  54
  55static int                      debug_objects_maxchain __read_mostly;
  56static int __maybe_unused       debug_objects_maxchecked __read_mostly;
  57static int                      debug_objects_fixups __read_mostly;
  58static int                      debug_objects_warnings __read_mostly;
  59static int                      debug_objects_enabled __read_mostly
  60                                = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  61static int                      debug_objects_pool_size __read_mostly
  62                                = ODEBUG_POOL_SIZE;
  63static int                      debug_objects_pool_min_level __read_mostly
  64                                = ODEBUG_POOL_MIN_LEVEL;
  65static struct debug_obj_descr   *descr_test  __read_mostly;
  66
  67/*
  68 * Track numbers of kmem_cache_alloc()/free() calls done.
  69 */
  70static int                      debug_objects_allocated;
  71static int                      debug_objects_freed;
  72
  73static void free_obj_work(struct work_struct *work);
  74static DECLARE_WORK(debug_obj_work, free_obj_work);
  75
  76static int __init enable_object_debug(char *str)
  77{
  78        debug_objects_enabled = 1;
  79        return 0;
  80}
  81
  82static int __init disable_object_debug(char *str)
  83{
  84        debug_objects_enabled = 0;
  85        return 0;
  86}
  87
  88early_param("debug_objects", enable_object_debug);
  89early_param("no_debug_objects", disable_object_debug);
  90
  91static const char *obj_states[ODEBUG_STATE_MAX] = {
  92        [ODEBUG_STATE_NONE]             = "none",
  93        [ODEBUG_STATE_INIT]             = "initialized",
  94        [ODEBUG_STATE_INACTIVE]         = "inactive",
  95        [ODEBUG_STATE_ACTIVE]           = "active",
  96        [ODEBUG_STATE_DESTROYED]        = "destroyed",
  97        [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
  98};
  99
 100static void fill_pool(void)
 101{
 102        gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
 103        struct debug_obj *new, *obj;
 104        unsigned long flags;
 105
 106        if (likely(obj_pool_free >= debug_objects_pool_min_level))
 107                return;
 108
 109        /*
 110         * Reuse objs from the global free list; they will be reinitialized
 111         * when allocating.
 112         */
 113        while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
 114                raw_spin_lock_irqsave(&pool_lock, flags);
 115                /*
 116                 * Recheck with the lock held as the worker thread might have
 117                 * won the race and freed the global free list already.
 118                 */
 119                if (obj_nr_tofree) {
 120                        obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
 121                        hlist_del(&obj->node);
 122                        obj_nr_tofree--;
 123                        hlist_add_head(&obj->node, &obj_pool);
 124                        obj_pool_free++;
 125                }
 126                raw_spin_unlock_irqrestore(&pool_lock, flags);
 127        }
 128
 129        if (unlikely(!obj_cache))
 130                return;
 131
 132        while (obj_pool_free < debug_objects_pool_min_level) {
 133
 134                new = kmem_cache_zalloc(obj_cache, gfp);
 135                if (!new)
 136                        return;
 137
 138                kmemleak_ignore(new);
 139                raw_spin_lock_irqsave(&pool_lock, flags);
 140                hlist_add_head(&new->node, &obj_pool);
 141                debug_objects_allocated++;
 142                obj_pool_free++;
 143                raw_spin_unlock_irqrestore(&pool_lock, flags);
 144        }
 145}
 146
 147/*
 148 * Lookup an object in the hash bucket.
 149 */
 150static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
 151{
 152        struct debug_obj *obj;
 153        int cnt = 0;
 154
 155        hlist_for_each_entry(obj, &b->list, node) {
 156                cnt++;
 157                if (obj->object == addr)
 158                        return obj;
 159        }
 160        if (cnt > debug_objects_maxchain)
 161                debug_objects_maxchain = cnt;
 162
 163        return NULL;
 164}
 165
 166/*
 167 * Allocate a new object. If the pool is empty, switch off the debugger.
 168 * Must be called with interrupts disabled.
 169 */
 170static struct debug_obj *
 171alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
 172{
 173        struct debug_obj *obj = NULL;
 174
 175        raw_spin_lock(&pool_lock);
 176        if (obj_pool.first) {
 177                obj         = hlist_entry(obj_pool.first, typeof(*obj), node);
 178
 179                obj->object = addr;
 180                obj->descr  = descr;
 181                obj->state  = ODEBUG_STATE_NONE;
 182                obj->astate = 0;
 183                hlist_del(&obj->node);
 184
 185                hlist_add_head(&obj->node, &b->list);
 186
 187                obj_pool_used++;
 188                if (obj_pool_used > obj_pool_max_used)
 189                        obj_pool_max_used = obj_pool_used;
 190
 191                obj_pool_free--;
 192                if (obj_pool_free < obj_pool_min_free)
 193                        obj_pool_min_free = obj_pool_free;
 194        }
 195        raw_spin_unlock(&pool_lock);
 196
 197        return obj;
 198}
 199
 200/*
 201 * workqueue function to free objects.
 202 *
 203 * To reduce contention on the global pool_lock, the actual freeing of
 204 * debug objects will be delayed if the pool_lock is busy.
 205 */
 206static void free_obj_work(struct work_struct *work)
 207{
 208        struct hlist_node *tmp;
 209        struct debug_obj *obj;
 210        unsigned long flags;
 211        HLIST_HEAD(tofree);
 212
 213        if (!raw_spin_trylock_irqsave(&pool_lock, flags))
 214                return;
 215
 216        /*
 217         * The objs on the pool list might be allocated before the work is
 218         * run, so recheck if pool list it full or not, if not fill pool
 219         * list from the global free list
 220         */
 221        while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
 222                obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
 223                hlist_del(&obj->node);
 224                hlist_add_head(&obj->node, &obj_pool);
 225                obj_pool_free++;
 226                obj_nr_tofree--;
 227        }
 228
 229        /*
 230         * Pool list is already full and there are still objs on the free
 231         * list. Move remaining free objs to a temporary list to free the
 232         * memory outside the pool_lock held region.
 233         */
 234        if (obj_nr_tofree) {
 235                hlist_move_list(&obj_to_free, &tofree);
 236                debug_objects_freed += obj_nr_tofree;
 237                obj_nr_tofree = 0;
 238        }
 239        raw_spin_unlock_irqrestore(&pool_lock, flags);
 240
 241        hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
 242                hlist_del(&obj->node);
 243                kmem_cache_free(obj_cache, obj);
 244        }
 245}
 246
 247static bool __free_object(struct debug_obj *obj)
 248{
 249        unsigned long flags;
 250        bool work;
 251
 252        raw_spin_lock_irqsave(&pool_lock, flags);
 253        work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
 254        obj_pool_used--;
 255
 256        if (work) {
 257                obj_nr_tofree++;
 258                hlist_add_head(&obj->node, &obj_to_free);
 259        } else {
 260                obj_pool_free++;
 261                hlist_add_head(&obj->node, &obj_pool);
 262        }
 263        raw_spin_unlock_irqrestore(&pool_lock, flags);
 264        return work;
 265}
 266
 267/*
 268 * Put the object back into the pool and schedule work to free objects
 269 * if necessary.
 270 */
 271static void free_object(struct debug_obj *obj)
 272{
 273        if (__free_object(obj))
 274                schedule_work(&debug_obj_work);
 275}
 276
 277/*
 278 * We run out of memory. That means we probably have tons of objects
 279 * allocated.
 280 */
 281static void debug_objects_oom(void)
 282{
 283        struct debug_bucket *db = obj_hash;
 284        struct hlist_node *tmp;
 285        HLIST_HEAD(freelist);
 286        struct debug_obj *obj;
 287        unsigned long flags;
 288        int i;
 289
 290        pr_warn("Out of memory. ODEBUG disabled\n");
 291
 292        for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
 293                raw_spin_lock_irqsave(&db->lock, flags);
 294                hlist_move_list(&db->list, &freelist);
 295                raw_spin_unlock_irqrestore(&db->lock, flags);
 296
 297                /* Now free them */
 298                hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
 299                        hlist_del(&obj->node);
 300                        free_object(obj);
 301                }
 302        }
 303}
 304
 305/*
 306 * We use the pfn of the address for the hash. That way we can check
 307 * for freed objects simply by checking the affected bucket.
 308 */
 309static struct debug_bucket *get_bucket(unsigned long addr)
 310{
 311        unsigned long hash;
 312
 313        hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
 314        return &obj_hash[hash];
 315}
 316
 317static void debug_print_object(struct debug_obj *obj, char *msg)
 318{
 319        struct debug_obj_descr *descr = obj->descr;
 320        static int limit;
 321
 322        if (limit < 5 && descr != descr_test) {
 323                void *hint = descr->debug_hint ?
 324                        descr->debug_hint(obj->object) : NULL;
 325                limit++;
 326                WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
 327                                 "object type: %s hint: %pS\n",
 328                        msg, obj_states[obj->state], obj->astate,
 329                        descr->name, hint);
 330        }
 331        debug_objects_warnings++;
 332}
 333
 334/*
 335 * Try to repair the damage, so we have a better chance to get useful
 336 * debug output.
 337 */
 338static bool
 339debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
 340                   void * addr, enum debug_obj_state state)
 341{
 342        if (fixup && fixup(addr, state)) {
 343                debug_objects_fixups++;
 344                return true;
 345        }
 346        return false;
 347}
 348
 349static void debug_object_is_on_stack(void *addr, int onstack)
 350{
 351        int is_on_stack;
 352        static int limit;
 353
 354        if (limit > 4)
 355                return;
 356
 357        is_on_stack = object_is_on_stack(addr);
 358        if (is_on_stack == onstack)
 359                return;
 360
 361        limit++;
 362        if (is_on_stack)
 363                pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
 364                         task_stack_page(current));
 365        else
 366                pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
 367                         task_stack_page(current));
 368
 369        WARN_ON(1);
 370}
 371
 372static void
 373__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
 374{
 375        enum debug_obj_state state;
 376        struct debug_bucket *db;
 377        struct debug_obj *obj;
 378        unsigned long flags;
 379
 380        fill_pool();
 381
 382        db = get_bucket((unsigned long) addr);
 383
 384        raw_spin_lock_irqsave(&db->lock, flags);
 385
 386        obj = lookup_object(addr, db);
 387        if (!obj) {
 388                obj = alloc_object(addr, db, descr);
 389                if (!obj) {
 390                        debug_objects_enabled = 0;
 391                        raw_spin_unlock_irqrestore(&db->lock, flags);
 392                        debug_objects_oom();
 393                        return;
 394                }
 395                debug_object_is_on_stack(addr, onstack);
 396        }
 397
 398        switch (obj->state) {
 399        case ODEBUG_STATE_NONE:
 400        case ODEBUG_STATE_INIT:
 401        case ODEBUG_STATE_INACTIVE:
 402                obj->state = ODEBUG_STATE_INIT;
 403                break;
 404
 405        case ODEBUG_STATE_ACTIVE:
 406                debug_print_object(obj, "init");
 407                state = obj->state;
 408                raw_spin_unlock_irqrestore(&db->lock, flags);
 409                debug_object_fixup(descr->fixup_init, addr, state);
 410                return;
 411
 412        case ODEBUG_STATE_DESTROYED:
 413                debug_print_object(obj, "init");
 414                break;
 415        default:
 416                break;
 417        }
 418
 419        raw_spin_unlock_irqrestore(&db->lock, flags);
 420}
 421
 422/**
 423 * debug_object_init - debug checks when an object is initialized
 424 * @addr:       address of the object
 425 * @descr:      pointer to an object specific debug description structure
 426 */
 427void debug_object_init(void *addr, struct debug_obj_descr *descr)
 428{
 429        if (!debug_objects_enabled)
 430                return;
 431
 432        __debug_object_init(addr, descr, 0);
 433}
 434EXPORT_SYMBOL_GPL(debug_object_init);
 435
 436/**
 437 * debug_object_init_on_stack - debug checks when an object on stack is
 438 *                              initialized
 439 * @addr:       address of the object
 440 * @descr:      pointer to an object specific debug description structure
 441 */
 442void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
 443{
 444        if (!debug_objects_enabled)
 445                return;
 446
 447        __debug_object_init(addr, descr, 1);
 448}
 449EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
 450
 451/**
 452 * debug_object_activate - debug checks when an object is activated
 453 * @addr:       address of the object
 454 * @descr:      pointer to an object specific debug description structure
 455 * Returns 0 for success, -EINVAL for check failed.
 456 */
 457int debug_object_activate(void *addr, struct debug_obj_descr *descr)
 458{
 459        enum debug_obj_state state;
 460        struct debug_bucket *db;
 461        struct debug_obj *obj;
 462        unsigned long flags;
 463        int ret;
 464        struct debug_obj o = { .object = addr,
 465                               .state = ODEBUG_STATE_NOTAVAILABLE,
 466                               .descr = descr };
 467
 468        if (!debug_objects_enabled)
 469                return 0;
 470
 471        db = get_bucket((unsigned long) addr);
 472
 473        raw_spin_lock_irqsave(&db->lock, flags);
 474
 475        obj = lookup_object(addr, db);
 476        if (obj) {
 477                switch (obj->state) {
 478                case ODEBUG_STATE_INIT:
 479                case ODEBUG_STATE_INACTIVE:
 480                        obj->state = ODEBUG_STATE_ACTIVE;
 481                        ret = 0;
 482                        break;
 483
 484                case ODEBUG_STATE_ACTIVE:
 485                        debug_print_object(obj, "activate");
 486                        state = obj->state;
 487                        raw_spin_unlock_irqrestore(&db->lock, flags);
 488                        ret = debug_object_fixup(descr->fixup_activate, addr, state);
 489                        return ret ? 0 : -EINVAL;
 490
 491                case ODEBUG_STATE_DESTROYED:
 492                        debug_print_object(obj, "activate");
 493                        ret = -EINVAL;
 494                        break;
 495                default:
 496                        ret = 0;
 497                        break;
 498                }
 499                raw_spin_unlock_irqrestore(&db->lock, flags);
 500                return ret;
 501        }
 502
 503        raw_spin_unlock_irqrestore(&db->lock, flags);
 504        /*
 505         * We are here when a static object is activated. We
 506         * let the type specific code confirm whether this is
 507         * true or not. if true, we just make sure that the
 508         * static object is tracked in the object tracker. If
 509         * not, this must be a bug, so we try to fix it up.
 510         */
 511        if (descr->is_static_object && descr->is_static_object(addr)) {
 512                /* track this static object */
 513                debug_object_init(addr, descr);
 514                debug_object_activate(addr, descr);
 515        } else {
 516                debug_print_object(&o, "activate");
 517                ret = debug_object_fixup(descr->fixup_activate, addr,
 518                                        ODEBUG_STATE_NOTAVAILABLE);
 519                return ret ? 0 : -EINVAL;
 520        }
 521        return 0;
 522}
 523EXPORT_SYMBOL_GPL(debug_object_activate);
 524
 525/**
 526 * debug_object_deactivate - debug checks when an object is deactivated
 527 * @addr:       address of the object
 528 * @descr:      pointer to an object specific debug description structure
 529 */
 530void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
 531{
 532        struct debug_bucket *db;
 533        struct debug_obj *obj;
 534        unsigned long flags;
 535
 536        if (!debug_objects_enabled)
 537                return;
 538
 539        db = get_bucket((unsigned long) addr);
 540
 541        raw_spin_lock_irqsave(&db->lock, flags);
 542
 543        obj = lookup_object(addr, db);
 544        if (obj) {
 545                switch (obj->state) {
 546                case ODEBUG_STATE_INIT:
 547                case ODEBUG_STATE_INACTIVE:
 548                case ODEBUG_STATE_ACTIVE:
 549                        if (!obj->astate)
 550                                obj->state = ODEBUG_STATE_INACTIVE;
 551                        else
 552                                debug_print_object(obj, "deactivate");
 553                        break;
 554
 555                case ODEBUG_STATE_DESTROYED:
 556                        debug_print_object(obj, "deactivate");
 557                        break;
 558                default:
 559                        break;
 560                }
 561        } else {
 562                struct debug_obj o = { .object = addr,
 563                                       .state = ODEBUG_STATE_NOTAVAILABLE,
 564                                       .descr = descr };
 565
 566                debug_print_object(&o, "deactivate");
 567        }
 568
 569        raw_spin_unlock_irqrestore(&db->lock, flags);
 570}
 571EXPORT_SYMBOL_GPL(debug_object_deactivate);
 572
 573/**
 574 * debug_object_destroy - debug checks when an object is destroyed
 575 * @addr:       address of the object
 576 * @descr:      pointer to an object specific debug description structure
 577 */
 578void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
 579{
 580        enum debug_obj_state state;
 581        struct debug_bucket *db;
 582        struct debug_obj *obj;
 583        unsigned long flags;
 584
 585        if (!debug_objects_enabled)
 586                return;
 587
 588        db = get_bucket((unsigned long) addr);
 589
 590        raw_spin_lock_irqsave(&db->lock, flags);
 591
 592        obj = lookup_object(addr, db);
 593        if (!obj)
 594                goto out_unlock;
 595
 596        switch (obj->state) {
 597        case ODEBUG_STATE_NONE:
 598        case ODEBUG_STATE_INIT:
 599        case ODEBUG_STATE_INACTIVE:
 600                obj->state = ODEBUG_STATE_DESTROYED;
 601                break;
 602        case ODEBUG_STATE_ACTIVE:
 603                debug_print_object(obj, "destroy");
 604                state = obj->state;
 605                raw_spin_unlock_irqrestore(&db->lock, flags);
 606                debug_object_fixup(descr->fixup_destroy, addr, state);
 607                return;
 608
 609        case ODEBUG_STATE_DESTROYED:
 610                debug_print_object(obj, "destroy");
 611                break;
 612        default:
 613                break;
 614        }
 615out_unlock:
 616        raw_spin_unlock_irqrestore(&db->lock, flags);
 617}
 618EXPORT_SYMBOL_GPL(debug_object_destroy);
 619
 620/**
 621 * debug_object_free - debug checks when an object is freed
 622 * @addr:       address of the object
 623 * @descr:      pointer to an object specific debug description structure
 624 */
 625void debug_object_free(void *addr, struct debug_obj_descr *descr)
 626{
 627        enum debug_obj_state state;
 628        struct debug_bucket *db;
 629        struct debug_obj *obj;
 630        unsigned long flags;
 631
 632        if (!debug_objects_enabled)
 633                return;
 634
 635        db = get_bucket((unsigned long) addr);
 636
 637        raw_spin_lock_irqsave(&db->lock, flags);
 638
 639        obj = lookup_object(addr, db);
 640        if (!obj)
 641                goto out_unlock;
 642
 643        switch (obj->state) {
 644        case ODEBUG_STATE_ACTIVE:
 645                debug_print_object(obj, "free");
 646                state = obj->state;
 647                raw_spin_unlock_irqrestore(&db->lock, flags);
 648                debug_object_fixup(descr->fixup_free, addr, state);
 649                return;
 650        default:
 651                hlist_del(&obj->node);
 652                raw_spin_unlock_irqrestore(&db->lock, flags);
 653                free_object(obj);
 654                return;
 655        }
 656out_unlock:
 657        raw_spin_unlock_irqrestore(&db->lock, flags);
 658}
 659EXPORT_SYMBOL_GPL(debug_object_free);
 660
 661/**
 662 * debug_object_assert_init - debug checks when object should be init-ed
 663 * @addr:       address of the object
 664 * @descr:      pointer to an object specific debug description structure
 665 */
 666void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
 667{
 668        struct debug_bucket *db;
 669        struct debug_obj *obj;
 670        unsigned long flags;
 671
 672        if (!debug_objects_enabled)
 673                return;
 674
 675        db = get_bucket((unsigned long) addr);
 676
 677        raw_spin_lock_irqsave(&db->lock, flags);
 678
 679        obj = lookup_object(addr, db);
 680        if (!obj) {
 681                struct debug_obj o = { .object = addr,
 682                                       .state = ODEBUG_STATE_NOTAVAILABLE,
 683                                       .descr = descr };
 684
 685                raw_spin_unlock_irqrestore(&db->lock, flags);
 686                /*
 687                 * Maybe the object is static, and we let the type specific
 688                 * code confirm. Track this static object if true, else invoke
 689                 * fixup.
 690                 */
 691                if (descr->is_static_object && descr->is_static_object(addr)) {
 692                        /* Track this static object */
 693                        debug_object_init(addr, descr);
 694                } else {
 695                        debug_print_object(&o, "assert_init");
 696                        debug_object_fixup(descr->fixup_assert_init, addr,
 697                                           ODEBUG_STATE_NOTAVAILABLE);
 698                }
 699                return;
 700        }
 701
 702        raw_spin_unlock_irqrestore(&db->lock, flags);
 703}
 704EXPORT_SYMBOL_GPL(debug_object_assert_init);
 705
 706/**
 707 * debug_object_active_state - debug checks object usage state machine
 708 * @addr:       address of the object
 709 * @descr:      pointer to an object specific debug description structure
 710 * @expect:     expected state
 711 * @next:       state to move to if expected state is found
 712 */
 713void
 714debug_object_active_state(void *addr, struct debug_obj_descr *descr,
 715                          unsigned int expect, unsigned int next)
 716{
 717        struct debug_bucket *db;
 718        struct debug_obj *obj;
 719        unsigned long flags;
 720
 721        if (!debug_objects_enabled)
 722                return;
 723
 724        db = get_bucket((unsigned long) addr);
 725
 726        raw_spin_lock_irqsave(&db->lock, flags);
 727
 728        obj = lookup_object(addr, db);
 729        if (obj) {
 730                switch (obj->state) {
 731                case ODEBUG_STATE_ACTIVE:
 732                        if (obj->astate == expect)
 733                                obj->astate = next;
 734                        else
 735                                debug_print_object(obj, "active_state");
 736                        break;
 737
 738                default:
 739                        debug_print_object(obj, "active_state");
 740                        break;
 741                }
 742        } else {
 743                struct debug_obj o = { .object = addr,
 744                                       .state = ODEBUG_STATE_NOTAVAILABLE,
 745                                       .descr = descr };
 746
 747                debug_print_object(&o, "active_state");
 748        }
 749
 750        raw_spin_unlock_irqrestore(&db->lock, flags);
 751}
 752EXPORT_SYMBOL_GPL(debug_object_active_state);
 753
 754#ifdef CONFIG_DEBUG_OBJECTS_FREE
 755static void __debug_check_no_obj_freed(const void *address, unsigned long size)
 756{
 757        unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
 758        struct debug_obj_descr *descr;
 759        enum debug_obj_state state;
 760        struct debug_bucket *db;
 761        struct hlist_node *tmp;
 762        struct debug_obj *obj;
 763        int cnt, objs_checked = 0;
 764        bool work = false;
 765
 766        saddr = (unsigned long) address;
 767        eaddr = saddr + size;
 768        paddr = saddr & ODEBUG_CHUNK_MASK;
 769        chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
 770        chunks >>= ODEBUG_CHUNK_SHIFT;
 771
 772        for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
 773                db = get_bucket(paddr);
 774
 775repeat:
 776                cnt = 0;
 777                raw_spin_lock_irqsave(&db->lock, flags);
 778                hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
 779                        cnt++;
 780                        oaddr = (unsigned long) obj->object;
 781                        if (oaddr < saddr || oaddr >= eaddr)
 782                                continue;
 783
 784                        switch (obj->state) {
 785                        case ODEBUG_STATE_ACTIVE:
 786                                debug_print_object(obj, "free");
 787                                descr = obj->descr;
 788                                state = obj->state;
 789                                raw_spin_unlock_irqrestore(&db->lock, flags);
 790                                debug_object_fixup(descr->fixup_free,
 791                                                   (void *) oaddr, state);
 792                                goto repeat;
 793                        default:
 794                                hlist_del(&obj->node);
 795                                work |= __free_object(obj);
 796                                break;
 797                        }
 798                }
 799                raw_spin_unlock_irqrestore(&db->lock, flags);
 800
 801                if (cnt > debug_objects_maxchain)
 802                        debug_objects_maxchain = cnt;
 803
 804                objs_checked += cnt;
 805        }
 806
 807        if (objs_checked > debug_objects_maxchecked)
 808                debug_objects_maxchecked = objs_checked;
 809
 810        /* Schedule work to actually kmem_cache_free() objects */
 811        if (work)
 812                schedule_work(&debug_obj_work);
 813}
 814
 815void debug_check_no_obj_freed(const void *address, unsigned long size)
 816{
 817        if (debug_objects_enabled)
 818                __debug_check_no_obj_freed(address, size);
 819}
 820#endif
 821
 822#ifdef CONFIG_DEBUG_FS
 823
 824static int debug_stats_show(struct seq_file *m, void *v)
 825{
 826        seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
 827        seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
 828        seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
 829        seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
 830        seq_printf(m, "pool_free     :%d\n", obj_pool_free);
 831        seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
 832        seq_printf(m, "pool_used     :%d\n", obj_pool_used);
 833        seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
 834        seq_printf(m, "on_free_list  :%d\n", obj_nr_tofree);
 835        seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
 836        seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
 837        return 0;
 838}
 839
 840static int debug_stats_open(struct inode *inode, struct file *filp)
 841{
 842        return single_open(filp, debug_stats_show, NULL);
 843}
 844
 845static const struct file_operations debug_stats_fops = {
 846        .open           = debug_stats_open,
 847        .read           = seq_read,
 848        .llseek         = seq_lseek,
 849        .release        = single_release,
 850};
 851
 852static int __init debug_objects_init_debugfs(void)
 853{
 854        struct dentry *dbgdir, *dbgstats;
 855
 856        if (!debug_objects_enabled)
 857                return 0;
 858
 859        dbgdir = debugfs_create_dir("debug_objects", NULL);
 860        if (!dbgdir)
 861                return -ENOMEM;
 862
 863        dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
 864                                       &debug_stats_fops);
 865        if (!dbgstats)
 866                goto err;
 867
 868        return 0;
 869
 870err:
 871        debugfs_remove(dbgdir);
 872
 873        return -ENOMEM;
 874}
 875__initcall(debug_objects_init_debugfs);
 876
 877#else
 878static inline void debug_objects_init_debugfs(void) { }
 879#endif
 880
 881#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
 882
 883/* Random data structure for the self test */
 884struct self_test {
 885        unsigned long   dummy1[6];
 886        int             static_init;
 887        unsigned long   dummy2[3];
 888};
 889
 890static __initdata struct debug_obj_descr descr_type_test;
 891
 892static bool __init is_static_object(void *addr)
 893{
 894        struct self_test *obj = addr;
 895
 896        return obj->static_init;
 897}
 898
 899/*
 900 * fixup_init is called when:
 901 * - an active object is initialized
 902 */
 903static bool __init fixup_init(void *addr, enum debug_obj_state state)
 904{
 905        struct self_test *obj = addr;
 906
 907        switch (state) {
 908        case ODEBUG_STATE_ACTIVE:
 909                debug_object_deactivate(obj, &descr_type_test);
 910                debug_object_init(obj, &descr_type_test);
 911                return true;
 912        default:
 913                return false;
 914        }
 915}
 916
 917/*
 918 * fixup_activate is called when:
 919 * - an active object is activated
 920 * - an unknown non-static object is activated
 921 */
 922static bool __init fixup_activate(void *addr, enum debug_obj_state state)
 923{
 924        struct self_test *obj = addr;
 925
 926        switch (state) {
 927        case ODEBUG_STATE_NOTAVAILABLE:
 928                return true;
 929        case ODEBUG_STATE_ACTIVE:
 930                debug_object_deactivate(obj, &descr_type_test);
 931                debug_object_activate(obj, &descr_type_test);
 932                return true;
 933
 934        default:
 935                return false;
 936        }
 937}
 938
 939/*
 940 * fixup_destroy is called when:
 941 * - an active object is destroyed
 942 */
 943static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
 944{
 945        struct self_test *obj = addr;
 946
 947        switch (state) {
 948        case ODEBUG_STATE_ACTIVE:
 949                debug_object_deactivate(obj, &descr_type_test);
 950                debug_object_destroy(obj, &descr_type_test);
 951                return true;
 952        default:
 953                return false;
 954        }
 955}
 956
 957/*
 958 * fixup_free is called when:
 959 * - an active object is freed
 960 */
 961static bool __init fixup_free(void *addr, enum debug_obj_state state)
 962{
 963        struct self_test *obj = addr;
 964
 965        switch (state) {
 966        case ODEBUG_STATE_ACTIVE:
 967                debug_object_deactivate(obj, &descr_type_test);
 968                debug_object_free(obj, &descr_type_test);
 969                return true;
 970        default:
 971                return false;
 972        }
 973}
 974
 975static int __init
 976check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
 977{
 978        struct debug_bucket *db;
 979        struct debug_obj *obj;
 980        unsigned long flags;
 981        int res = -EINVAL;
 982
 983        db = get_bucket((unsigned long) addr);
 984
 985        raw_spin_lock_irqsave(&db->lock, flags);
 986
 987        obj = lookup_object(addr, db);
 988        if (!obj && state != ODEBUG_STATE_NONE) {
 989                WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
 990                goto out;
 991        }
 992        if (obj && obj->state != state) {
 993                WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
 994                       obj->state, state);
 995                goto out;
 996        }
 997        if (fixups != debug_objects_fixups) {
 998                WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
 999                       fixups, debug_objects_fixups);
1000                goto out;
1001        }
1002        if (warnings != debug_objects_warnings) {
1003                WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1004                       warnings, debug_objects_warnings);
1005                goto out;
1006        }
1007        res = 0;
1008out:
1009        raw_spin_unlock_irqrestore(&db->lock, flags);
1010        if (res)
1011                debug_objects_enabled = 0;
1012        return res;
1013}
1014
1015static __initdata struct debug_obj_descr descr_type_test = {
1016        .name                   = "selftest",
1017        .is_static_object       = is_static_object,
1018        .fixup_init             = fixup_init,
1019        .fixup_activate         = fixup_activate,
1020        .fixup_destroy          = fixup_destroy,
1021        .fixup_free             = fixup_free,
1022};
1023
1024static __initdata struct self_test obj = { .static_init = 0 };
1025
1026static void __init debug_objects_selftest(void)
1027{
1028        int fixups, oldfixups, warnings, oldwarnings;
1029        unsigned long flags;
1030
1031        local_irq_save(flags);
1032
1033        fixups = oldfixups = debug_objects_fixups;
1034        warnings = oldwarnings = debug_objects_warnings;
1035        descr_test = &descr_type_test;
1036
1037        debug_object_init(&obj, &descr_type_test);
1038        if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1039                goto out;
1040        debug_object_activate(&obj, &descr_type_test);
1041        if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1042                goto out;
1043        debug_object_activate(&obj, &descr_type_test);
1044        if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1045                goto out;
1046        debug_object_deactivate(&obj, &descr_type_test);
1047        if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1048                goto out;
1049        debug_object_destroy(&obj, &descr_type_test);
1050        if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1051                goto out;
1052        debug_object_init(&obj, &descr_type_test);
1053        if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1054                goto out;
1055        debug_object_activate(&obj, &descr_type_test);
1056        if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1057                goto out;
1058        debug_object_deactivate(&obj, &descr_type_test);
1059        if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1060                goto out;
1061        debug_object_free(&obj, &descr_type_test);
1062        if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1063                goto out;
1064
1065        obj.static_init = 1;
1066        debug_object_activate(&obj, &descr_type_test);
1067        if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1068                goto out;
1069        debug_object_init(&obj, &descr_type_test);
1070        if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1071                goto out;
1072        debug_object_free(&obj, &descr_type_test);
1073        if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1074                goto out;
1075
1076#ifdef CONFIG_DEBUG_OBJECTS_FREE
1077        debug_object_init(&obj, &descr_type_test);
1078        if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1079                goto out;
1080        debug_object_activate(&obj, &descr_type_test);
1081        if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1082                goto out;
1083        __debug_check_no_obj_freed(&obj, sizeof(obj));
1084        if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1085                goto out;
1086#endif
1087        pr_info("selftest passed\n");
1088
1089out:
1090        debug_objects_fixups = oldfixups;
1091        debug_objects_warnings = oldwarnings;
1092        descr_test = NULL;
1093
1094        local_irq_restore(flags);
1095}
1096#else
1097static inline void debug_objects_selftest(void) { }
1098#endif
1099
1100/*
1101 * Called during early boot to initialize the hash buckets and link
1102 * the static object pool objects into the poll list. After this call
1103 * the object tracker is fully operational.
1104 */
1105void __init debug_objects_early_init(void)
1106{
1107        int i;
1108
1109        for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1110                raw_spin_lock_init(&obj_hash[i].lock);
1111
1112        for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1113                hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1114}
1115
1116/*
1117 * Convert the statically allocated objects to dynamic ones:
1118 */
1119static int __init debug_objects_replace_static_objects(void)
1120{
1121        struct debug_bucket *db = obj_hash;
1122        struct hlist_node *tmp;
1123        struct debug_obj *obj, *new;
1124        HLIST_HEAD(objects);
1125        int i, cnt = 0;
1126
1127        for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1128                obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1129                if (!obj)
1130                        goto free;
1131                kmemleak_ignore(obj);
1132                hlist_add_head(&obj->node, &objects);
1133        }
1134
1135        /*
1136         * When debug_objects_mem_init() is called we know that only
1137         * one CPU is up, so disabling interrupts is enough
1138         * protection. This avoids the lockdep hell of lock ordering.
1139         */
1140        local_irq_disable();
1141
1142        /* Remove the statically allocated objects from the pool */
1143        hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1144                hlist_del(&obj->node);
1145        /* Move the allocated objects to the pool */
1146        hlist_move_list(&objects, &obj_pool);
1147
1148        /* Replace the active object references */
1149        for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1150                hlist_move_list(&db->list, &objects);
1151
1152                hlist_for_each_entry(obj, &objects, node) {
1153                        new = hlist_entry(obj_pool.first, typeof(*obj), node);
1154                        hlist_del(&new->node);
1155                        /* copy object data */
1156                        *new = *obj;
1157                        hlist_add_head(&new->node, &db->list);
1158                        cnt++;
1159                }
1160        }
1161        local_irq_enable();
1162
1163        pr_debug("%d of %d active objects replaced\n",
1164                 cnt, obj_pool_used);
1165        return 0;
1166free:
1167        hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1168                hlist_del(&obj->node);
1169                kmem_cache_free(obj_cache, obj);
1170        }
1171        return -ENOMEM;
1172}
1173
1174/*
1175 * Called after the kmem_caches are functional to setup a dedicated
1176 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1177 * prevents that the debug code is called on kmem_cache_free() for the
1178 * debug tracker objects to avoid recursive calls.
1179 */
1180void __init debug_objects_mem_init(void)
1181{
1182        if (!debug_objects_enabled)
1183                return;
1184
1185        obj_cache = kmem_cache_create("debug_objects_cache",
1186                                      sizeof (struct debug_obj), 0,
1187                                      SLAB_DEBUG_OBJECTS, NULL);
1188
1189        if (!obj_cache || debug_objects_replace_static_objects()) {
1190                debug_objects_enabled = 0;
1191                kmem_cache_destroy(obj_cache);
1192                pr_warn("out of memory.\n");
1193        } else
1194                debug_objects_selftest();
1195
1196        /*
1197         * Increase the thresholds for allocating and freeing objects
1198         * according to the number of possible CPUs available in the system.
1199         */
1200        debug_objects_pool_size += num_possible_cpus() * 32;
1201        debug_objects_pool_min_level += num_possible_cpus() * 4;
1202}
1203