linux/lib/debugobjects.c
<<
>>
Prefs
   1/*
   2 * Generic infrastructure for lifetime debugging of objects.
   3 *
   4 * Started by Thomas Gleixner
   5 *
   6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
   7 *
   8 * For licencing details see kernel-base/COPYING
   9 */
  10
  11#define pr_fmt(fmt) "ODEBUG: " fmt
  12
  13#include <linux/debugobjects.h>
  14#include <linux/interrupt.h>
  15#include <linux/sched.h>
  16#include <linux/sched/task_stack.h>
  17#include <linux/seq_file.h>
  18#include <linux/debugfs.h>
  19#include <linux/slab.h>
  20#include <linux/hash.h>
  21#include <linux/kmemleak.h>
  22#include <linux/cpu.h>
  23
  24#define ODEBUG_HASH_BITS        14
  25#define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
  26
  27#define ODEBUG_POOL_SIZE        1024
  28#define ODEBUG_POOL_MIN_LEVEL   256
  29#define ODEBUG_POOL_PERCPU_SIZE 64
  30#define ODEBUG_BATCH_SIZE       16
  31
  32#define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
  33#define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
  34#define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
  35
  36/*
  37 * We limit the freeing of debug objects via workqueue at a maximum
  38 * frequency of 10Hz and about 1024 objects for each freeing operation.
  39 * So it is freeing at most 10k debug objects per second.
  40 */
  41#define ODEBUG_FREE_WORK_MAX    1024
  42#define ODEBUG_FREE_WORK_DELAY  DIV_ROUND_UP(HZ, 10)
  43
  44struct debug_bucket {
  45        struct hlist_head       list;
  46        raw_spinlock_t          lock;
  47};
  48
  49/*
  50 * Debug object percpu free list
  51 * Access is protected by disabling irq
  52 */
  53struct debug_percpu_free {
  54        struct hlist_head       free_objs;
  55        int                     obj_free;
  56};
  57
  58static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
  59
  60static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
  61
  62static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  63
  64static DEFINE_RAW_SPINLOCK(pool_lock);
  65
  66static HLIST_HEAD(obj_pool);
  67static HLIST_HEAD(obj_to_free);
  68
  69/*
  70 * Because of the presence of percpu free pools, obj_pool_free will
  71 * under-count those in the percpu free pools. Similarly, obj_pool_used
  72 * will over-count those in the percpu free pools. Adjustments will be
  73 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
  74 * can be off.
  75 */
  76static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
  77static int                      obj_pool_free = ODEBUG_POOL_SIZE;
  78static int                      obj_pool_used;
  79static int                      obj_pool_max_used;
  80static bool                     obj_freeing;
  81/* The number of objs on the global free list */
  82static int                      obj_nr_tofree;
  83
  84static int                      debug_objects_maxchain __read_mostly;
  85static int __maybe_unused       debug_objects_maxchecked __read_mostly;
  86static int                      debug_objects_fixups __read_mostly;
  87static int                      debug_objects_warnings __read_mostly;
  88static int                      debug_objects_enabled __read_mostly
  89                                = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  90static int                      debug_objects_pool_size __read_mostly
  91                                = ODEBUG_POOL_SIZE;
  92static int                      debug_objects_pool_min_level __read_mostly
  93                                = ODEBUG_POOL_MIN_LEVEL;
  94static const struct debug_obj_descr *descr_test  __read_mostly;
  95static struct kmem_cache        *obj_cache __read_mostly;
  96
  97/*
  98 * Track numbers of kmem_cache_alloc()/free() calls done.
  99 */
 100static int                      debug_objects_allocated;
 101static int                      debug_objects_freed;
 102
 103static void free_obj_work(struct work_struct *work);
 104static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
 105
 106static int __init enable_object_debug(char *str)
 107{
 108        debug_objects_enabled = 1;
 109        return 0;
 110}
 111
 112static int __init disable_object_debug(char *str)
 113{
 114        debug_objects_enabled = 0;
 115        return 0;
 116}
 117
 118early_param("debug_objects", enable_object_debug);
 119early_param("no_debug_objects", disable_object_debug);
 120
 121static const char *obj_states[ODEBUG_STATE_MAX] = {
 122        [ODEBUG_STATE_NONE]             = "none",
 123        [ODEBUG_STATE_INIT]             = "initialized",
 124        [ODEBUG_STATE_INACTIVE]         = "inactive",
 125        [ODEBUG_STATE_ACTIVE]           = "active",
 126        [ODEBUG_STATE_DESTROYED]        = "destroyed",
 127        [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
 128};
 129
 130static void fill_pool(void)
 131{
 132        gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
 133        struct debug_obj *obj;
 134        unsigned long flags;
 135
 136        if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
 137                return;
 138
 139        /*
 140         * Reuse objs from the global free list; they will be reinitialized
 141         * when allocating.
 142         *
 143         * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
 144         * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
 145         * sections.
 146         */
 147        while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
 148                raw_spin_lock_irqsave(&pool_lock, flags);
 149                /*
 150                 * Recheck with the lock held as the worker thread might have
 151                 * won the race and freed the global free list already.
 152                 */
 153                while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
 154                        obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
 155                        hlist_del(&obj->node);
 156                        WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
 157                        hlist_add_head(&obj->node, &obj_pool);
 158                        WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 159                }
 160                raw_spin_unlock_irqrestore(&pool_lock, flags);
 161        }
 162
 163        if (unlikely(!obj_cache))
 164                return;
 165
 166        while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
 167                struct debug_obj *new[ODEBUG_BATCH_SIZE];
 168                int cnt;
 169
 170                for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
 171                        new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
 172                        if (!new[cnt])
 173                                break;
 174                }
 175                if (!cnt)
 176                        return;
 177
 178                raw_spin_lock_irqsave(&pool_lock, flags);
 179                while (cnt) {
 180                        hlist_add_head(&new[--cnt]->node, &obj_pool);
 181                        debug_objects_allocated++;
 182                        WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 183                }
 184                raw_spin_unlock_irqrestore(&pool_lock, flags);
 185        }
 186}
 187
 188/*
 189 * Lookup an object in the hash bucket.
 190 */
 191static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
 192{
 193        struct debug_obj *obj;
 194        int cnt = 0;
 195
 196        hlist_for_each_entry(obj, &b->list, node) {
 197                cnt++;
 198                if (obj->object == addr)
 199                        return obj;
 200        }
 201        if (cnt > debug_objects_maxchain)
 202                debug_objects_maxchain = cnt;
 203
 204        return NULL;
 205}
 206
 207/*
 208 * Allocate a new object from the hlist
 209 */
 210static struct debug_obj *__alloc_object(struct hlist_head *list)
 211{
 212        struct debug_obj *obj = NULL;
 213
 214        if (list->first) {
 215                obj = hlist_entry(list->first, typeof(*obj), node);
 216                hlist_del(&obj->node);
 217        }
 218
 219        return obj;
 220}
 221
 222/*
 223 * Allocate a new object. If the pool is empty, switch off the debugger.
 224 * Must be called with interrupts disabled.
 225 */
 226static struct debug_obj *
 227alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
 228{
 229        struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
 230        struct debug_obj *obj;
 231
 232        if (likely(obj_cache)) {
 233                obj = __alloc_object(&percpu_pool->free_objs);
 234                if (obj) {
 235                        percpu_pool->obj_free--;
 236                        goto init_obj;
 237                }
 238        }
 239
 240        raw_spin_lock(&pool_lock);
 241        obj = __alloc_object(&obj_pool);
 242        if (obj) {
 243                obj_pool_used++;
 244                WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 245
 246                /*
 247                 * Looking ahead, allocate one batch of debug objects and
 248                 * put them into the percpu free pool.
 249                 */
 250                if (likely(obj_cache)) {
 251                        int i;
 252
 253                        for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
 254                                struct debug_obj *obj2;
 255
 256                                obj2 = __alloc_object(&obj_pool);
 257                                if (!obj2)
 258                                        break;
 259                                hlist_add_head(&obj2->node,
 260                                               &percpu_pool->free_objs);
 261                                percpu_pool->obj_free++;
 262                                obj_pool_used++;
 263                                WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 264                        }
 265                }
 266
 267                if (obj_pool_used > obj_pool_max_used)
 268                        obj_pool_max_used = obj_pool_used;
 269
 270                if (obj_pool_free < obj_pool_min_free)
 271                        obj_pool_min_free = obj_pool_free;
 272        }
 273        raw_spin_unlock(&pool_lock);
 274
 275init_obj:
 276        if (obj) {
 277                obj->object = addr;
 278                obj->descr  = descr;
 279                obj->state  = ODEBUG_STATE_NONE;
 280                obj->astate = 0;
 281                hlist_add_head(&obj->node, &b->list);
 282        }
 283        return obj;
 284}
 285
 286/*
 287 * workqueue function to free objects.
 288 *
 289 * To reduce contention on the global pool_lock, the actual freeing of
 290 * debug objects will be delayed if the pool_lock is busy.
 291 */
 292static void free_obj_work(struct work_struct *work)
 293{
 294        struct hlist_node *tmp;
 295        struct debug_obj *obj;
 296        unsigned long flags;
 297        HLIST_HEAD(tofree);
 298
 299        WRITE_ONCE(obj_freeing, false);
 300        if (!raw_spin_trylock_irqsave(&pool_lock, flags))
 301                return;
 302
 303        if (obj_pool_free >= debug_objects_pool_size)
 304                goto free_objs;
 305
 306        /*
 307         * The objs on the pool list might be allocated before the work is
 308         * run, so recheck if pool list it full or not, if not fill pool
 309         * list from the global free list. As it is likely that a workload
 310         * may be gearing up to use more and more objects, don't free any
 311         * of them until the next round.
 312         */
 313        while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
 314                obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
 315                hlist_del(&obj->node);
 316                hlist_add_head(&obj->node, &obj_pool);
 317                WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 318                WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
 319        }
 320        raw_spin_unlock_irqrestore(&pool_lock, flags);
 321        return;
 322
 323free_objs:
 324        /*
 325         * Pool list is already full and there are still objs on the free
 326         * list. Move remaining free objs to a temporary list to free the
 327         * memory outside the pool_lock held region.
 328         */
 329        if (obj_nr_tofree) {
 330                hlist_move_list(&obj_to_free, &tofree);
 331                debug_objects_freed += obj_nr_tofree;
 332                WRITE_ONCE(obj_nr_tofree, 0);
 333        }
 334        raw_spin_unlock_irqrestore(&pool_lock, flags);
 335
 336        hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
 337                hlist_del(&obj->node);
 338                kmem_cache_free(obj_cache, obj);
 339        }
 340}
 341
 342static void __free_object(struct debug_obj *obj)
 343{
 344        struct debug_obj *objs[ODEBUG_BATCH_SIZE];
 345        struct debug_percpu_free *percpu_pool;
 346        int lookahead_count = 0;
 347        unsigned long flags;
 348        bool work;
 349
 350        local_irq_save(flags);
 351        if (!obj_cache)
 352                goto free_to_obj_pool;
 353
 354        /*
 355         * Try to free it into the percpu pool first.
 356         */
 357        percpu_pool = this_cpu_ptr(&percpu_obj_pool);
 358        if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
 359                hlist_add_head(&obj->node, &percpu_pool->free_objs);
 360                percpu_pool->obj_free++;
 361                local_irq_restore(flags);
 362                return;
 363        }
 364
 365        /*
 366         * As the percpu pool is full, look ahead and pull out a batch
 367         * of objects from the percpu pool and free them as well.
 368         */
 369        for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
 370                objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
 371                if (!objs[lookahead_count])
 372                        break;
 373                percpu_pool->obj_free--;
 374        }
 375
 376free_to_obj_pool:
 377        raw_spin_lock(&pool_lock);
 378        work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
 379               (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
 380        obj_pool_used--;
 381
 382        if (work) {
 383                WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
 384                hlist_add_head(&obj->node, &obj_to_free);
 385                if (lookahead_count) {
 386                        WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
 387                        obj_pool_used -= lookahead_count;
 388                        while (lookahead_count) {
 389                                hlist_add_head(&objs[--lookahead_count]->node,
 390                                               &obj_to_free);
 391                        }
 392                }
 393
 394                if ((obj_pool_free > debug_objects_pool_size) &&
 395                    (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
 396                        int i;
 397
 398                        /*
 399                         * Free one more batch of objects from obj_pool.
 400                         */
 401                        for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
 402                                obj = __alloc_object(&obj_pool);
 403                                hlist_add_head(&obj->node, &obj_to_free);
 404                                WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 405                                WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
 406                        }
 407                }
 408        } else {
 409                WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 410                hlist_add_head(&obj->node, &obj_pool);
 411                if (lookahead_count) {
 412                        WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
 413                        obj_pool_used -= lookahead_count;
 414                        while (lookahead_count) {
 415                                hlist_add_head(&objs[--lookahead_count]->node,
 416                                               &obj_pool);
 417                        }
 418                }
 419        }
 420        raw_spin_unlock(&pool_lock);
 421        local_irq_restore(flags);
 422}
 423
 424/*
 425 * Put the object back into the pool and schedule work to free objects
 426 * if necessary.
 427 */
 428static void free_object(struct debug_obj *obj)
 429{
 430        __free_object(obj);
 431        if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
 432                WRITE_ONCE(obj_freeing, true);
 433                schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
 434        }
 435}
 436
 437#ifdef CONFIG_HOTPLUG_CPU
 438static int object_cpu_offline(unsigned int cpu)
 439{
 440        struct debug_percpu_free *percpu_pool;
 441        struct hlist_node *tmp;
 442        struct debug_obj *obj;
 443
 444        /* Remote access is safe as the CPU is dead already */
 445        percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
 446        hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
 447                hlist_del(&obj->node);
 448                kmem_cache_free(obj_cache, obj);
 449        }
 450        percpu_pool->obj_free = 0;
 451
 452        return 0;
 453}
 454#endif
 455
 456/*
 457 * We run out of memory. That means we probably have tons of objects
 458 * allocated.
 459 */
 460static void debug_objects_oom(void)
 461{
 462        struct debug_bucket *db = obj_hash;
 463        struct hlist_node *tmp;
 464        HLIST_HEAD(freelist);
 465        struct debug_obj *obj;
 466        unsigned long flags;
 467        int i;
 468
 469        pr_warn("Out of memory. ODEBUG disabled\n");
 470
 471        for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
 472                raw_spin_lock_irqsave(&db->lock, flags);
 473                hlist_move_list(&db->list, &freelist);
 474                raw_spin_unlock_irqrestore(&db->lock, flags);
 475
 476                /* Now free them */
 477                hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
 478                        hlist_del(&obj->node);
 479                        free_object(obj);
 480                }
 481        }
 482}
 483
 484/*
 485 * We use the pfn of the address for the hash. That way we can check
 486 * for freed objects simply by checking the affected bucket.
 487 */
 488static struct debug_bucket *get_bucket(unsigned long addr)
 489{
 490        unsigned long hash;
 491
 492        hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
 493        return &obj_hash[hash];
 494}
 495
 496static void debug_print_object(struct debug_obj *obj, char *msg)
 497{
 498        const struct debug_obj_descr *descr = obj->descr;
 499        static int limit;
 500
 501        if (limit < 5 && descr != descr_test) {
 502                void *hint = descr->debug_hint ?
 503                        descr->debug_hint(obj->object) : NULL;
 504                limit++;
 505                WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
 506                                 "object type: %s hint: %pS\n",
 507                        msg, obj_states[obj->state], obj->astate,
 508                        descr->name, hint);
 509        }
 510        debug_objects_warnings++;
 511}
 512
 513/*
 514 * Try to repair the damage, so we have a better chance to get useful
 515 * debug output.
 516 */
 517static bool
 518debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
 519                   void * addr, enum debug_obj_state state)
 520{
 521        if (fixup && fixup(addr, state)) {
 522                debug_objects_fixups++;
 523                return true;
 524        }
 525        return false;
 526}
 527
 528static void debug_object_is_on_stack(void *addr, int onstack)
 529{
 530        int is_on_stack;
 531        static int limit;
 532
 533        if (limit > 4)
 534                return;
 535
 536        is_on_stack = object_is_on_stack(addr);
 537        if (is_on_stack == onstack)
 538                return;
 539
 540        limit++;
 541        if (is_on_stack)
 542                pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
 543                         task_stack_page(current));
 544        else
 545                pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
 546                         task_stack_page(current));
 547
 548        WARN_ON(1);
 549}
 550
 551static void
 552__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
 553{
 554        enum debug_obj_state state;
 555        bool check_stack = false;
 556        struct debug_bucket *db;
 557        struct debug_obj *obj;
 558        unsigned long flags;
 559
 560        fill_pool();
 561
 562        db = get_bucket((unsigned long) addr);
 563
 564        raw_spin_lock_irqsave(&db->lock, flags);
 565
 566        obj = lookup_object(addr, db);
 567        if (!obj) {
 568                obj = alloc_object(addr, db, descr);
 569                if (!obj) {
 570                        debug_objects_enabled = 0;
 571                        raw_spin_unlock_irqrestore(&db->lock, flags);
 572                        debug_objects_oom();
 573                        return;
 574                }
 575                check_stack = true;
 576        }
 577
 578        switch (obj->state) {
 579        case ODEBUG_STATE_NONE:
 580        case ODEBUG_STATE_INIT:
 581        case ODEBUG_STATE_INACTIVE:
 582                obj->state = ODEBUG_STATE_INIT;
 583                break;
 584
 585        case ODEBUG_STATE_ACTIVE:
 586                state = obj->state;
 587                raw_spin_unlock_irqrestore(&db->lock, flags);
 588                debug_print_object(obj, "init");
 589                debug_object_fixup(descr->fixup_init, addr, state);
 590                return;
 591
 592        case ODEBUG_STATE_DESTROYED:
 593                raw_spin_unlock_irqrestore(&db->lock, flags);
 594                debug_print_object(obj, "init");
 595                return;
 596        default:
 597                break;
 598        }
 599
 600        raw_spin_unlock_irqrestore(&db->lock, flags);
 601        if (check_stack)
 602                debug_object_is_on_stack(addr, onstack);
 603}
 604
 605/**
 606 * debug_object_init - debug checks when an object is initialized
 607 * @addr:       address of the object
 608 * @descr:      pointer to an object specific debug description structure
 609 */
 610void debug_object_init(void *addr, const struct debug_obj_descr *descr)
 611{
 612        if (!debug_objects_enabled)
 613                return;
 614
 615        __debug_object_init(addr, descr, 0);
 616}
 617EXPORT_SYMBOL_GPL(debug_object_init);
 618
 619/**
 620 * debug_object_init_on_stack - debug checks when an object on stack is
 621 *                              initialized
 622 * @addr:       address of the object
 623 * @descr:      pointer to an object specific debug description structure
 624 */
 625void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
 626{
 627        if (!debug_objects_enabled)
 628                return;
 629
 630        __debug_object_init(addr, descr, 1);
 631}
 632EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
 633
 634/**
 635 * debug_object_activate - debug checks when an object is activated
 636 * @addr:       address of the object
 637 * @descr:      pointer to an object specific debug description structure
 638 * Returns 0 for success, -EINVAL for check failed.
 639 */
 640int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
 641{
 642        enum debug_obj_state state;
 643        struct debug_bucket *db;
 644        struct debug_obj *obj;
 645        unsigned long flags;
 646        int ret;
 647        struct debug_obj o = { .object = addr,
 648                               .state = ODEBUG_STATE_NOTAVAILABLE,
 649                               .descr = descr };
 650
 651        if (!debug_objects_enabled)
 652                return 0;
 653
 654        db = get_bucket((unsigned long) addr);
 655
 656        raw_spin_lock_irqsave(&db->lock, flags);
 657
 658        obj = lookup_object(addr, db);
 659        if (obj) {
 660                bool print_object = false;
 661
 662                switch (obj->state) {
 663                case ODEBUG_STATE_INIT:
 664                case ODEBUG_STATE_INACTIVE:
 665                        obj->state = ODEBUG_STATE_ACTIVE;
 666                        ret = 0;
 667                        break;
 668
 669                case ODEBUG_STATE_ACTIVE:
 670                        state = obj->state;
 671                        raw_spin_unlock_irqrestore(&db->lock, flags);
 672                        debug_print_object(obj, "activate");
 673                        ret = debug_object_fixup(descr->fixup_activate, addr, state);
 674                        return ret ? 0 : -EINVAL;
 675
 676                case ODEBUG_STATE_DESTROYED:
 677                        print_object = true;
 678                        ret = -EINVAL;
 679                        break;
 680                default:
 681                        ret = 0;
 682                        break;
 683                }
 684                raw_spin_unlock_irqrestore(&db->lock, flags);
 685                if (print_object)
 686                        debug_print_object(obj, "activate");
 687                return ret;
 688        }
 689
 690        raw_spin_unlock_irqrestore(&db->lock, flags);
 691
 692        /*
 693         * We are here when a static object is activated. We
 694         * let the type specific code confirm whether this is
 695         * true or not. if true, we just make sure that the
 696         * static object is tracked in the object tracker. If
 697         * not, this must be a bug, so we try to fix it up.
 698         */
 699        if (descr->is_static_object && descr->is_static_object(addr)) {
 700                /* track this static object */
 701                debug_object_init(addr, descr);
 702                debug_object_activate(addr, descr);
 703        } else {
 704                debug_print_object(&o, "activate");
 705                ret = debug_object_fixup(descr->fixup_activate, addr,
 706                                        ODEBUG_STATE_NOTAVAILABLE);
 707                return ret ? 0 : -EINVAL;
 708        }
 709        return 0;
 710}
 711EXPORT_SYMBOL_GPL(debug_object_activate);
 712
 713/**
 714 * debug_object_deactivate - debug checks when an object is deactivated
 715 * @addr:       address of the object
 716 * @descr:      pointer to an object specific debug description structure
 717 */
 718void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
 719{
 720        struct debug_bucket *db;
 721        struct debug_obj *obj;
 722        unsigned long flags;
 723        bool print_object = false;
 724
 725        if (!debug_objects_enabled)
 726                return;
 727
 728        db = get_bucket((unsigned long) addr);
 729
 730        raw_spin_lock_irqsave(&db->lock, flags);
 731
 732        obj = lookup_object(addr, db);
 733        if (obj) {
 734                switch (obj->state) {
 735                case ODEBUG_STATE_INIT:
 736                case ODEBUG_STATE_INACTIVE:
 737                case ODEBUG_STATE_ACTIVE:
 738                        if (!obj->astate)
 739                                obj->state = ODEBUG_STATE_INACTIVE;
 740                        else
 741                                print_object = true;
 742                        break;
 743
 744                case ODEBUG_STATE_DESTROYED:
 745                        print_object = true;
 746                        break;
 747                default:
 748                        break;
 749                }
 750        }
 751
 752        raw_spin_unlock_irqrestore(&db->lock, flags);
 753        if (!obj) {
 754                struct debug_obj o = { .object = addr,
 755                                       .state = ODEBUG_STATE_NOTAVAILABLE,
 756                                       .descr = descr };
 757
 758                debug_print_object(&o, "deactivate");
 759        } else if (print_object) {
 760                debug_print_object(obj, "deactivate");
 761        }
 762}
 763EXPORT_SYMBOL_GPL(debug_object_deactivate);
 764
 765/**
 766 * debug_object_destroy - debug checks when an object is destroyed
 767 * @addr:       address of the object
 768 * @descr:      pointer to an object specific debug description structure
 769 */
 770void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
 771{
 772        enum debug_obj_state state;
 773        struct debug_bucket *db;
 774        struct debug_obj *obj;
 775        unsigned long flags;
 776        bool print_object = false;
 777
 778        if (!debug_objects_enabled)
 779                return;
 780
 781        db = get_bucket((unsigned long) addr);
 782
 783        raw_spin_lock_irqsave(&db->lock, flags);
 784
 785        obj = lookup_object(addr, db);
 786        if (!obj)
 787                goto out_unlock;
 788
 789        switch (obj->state) {
 790        case ODEBUG_STATE_NONE:
 791        case ODEBUG_STATE_INIT:
 792        case ODEBUG_STATE_INACTIVE:
 793                obj->state = ODEBUG_STATE_DESTROYED;
 794                break;
 795        case ODEBUG_STATE_ACTIVE:
 796                state = obj->state;
 797                raw_spin_unlock_irqrestore(&db->lock, flags);
 798                debug_print_object(obj, "destroy");
 799                debug_object_fixup(descr->fixup_destroy, addr, state);
 800                return;
 801
 802        case ODEBUG_STATE_DESTROYED:
 803                print_object = true;
 804                break;
 805        default:
 806                break;
 807        }
 808out_unlock:
 809        raw_spin_unlock_irqrestore(&db->lock, flags);
 810        if (print_object)
 811                debug_print_object(obj, "destroy");
 812}
 813EXPORT_SYMBOL_GPL(debug_object_destroy);
 814
 815/**
 816 * debug_object_free - debug checks when an object is freed
 817 * @addr:       address of the object
 818 * @descr:      pointer to an object specific debug description structure
 819 */
 820void debug_object_free(void *addr, const struct debug_obj_descr *descr)
 821{
 822        enum debug_obj_state state;
 823        struct debug_bucket *db;
 824        struct debug_obj *obj;
 825        unsigned long flags;
 826
 827        if (!debug_objects_enabled)
 828                return;
 829
 830        db = get_bucket((unsigned long) addr);
 831
 832        raw_spin_lock_irqsave(&db->lock, flags);
 833
 834        obj = lookup_object(addr, db);
 835        if (!obj)
 836                goto out_unlock;
 837
 838        switch (obj->state) {
 839        case ODEBUG_STATE_ACTIVE:
 840                state = obj->state;
 841                raw_spin_unlock_irqrestore(&db->lock, flags);
 842                debug_print_object(obj, "free");
 843                debug_object_fixup(descr->fixup_free, addr, state);
 844                return;
 845        default:
 846                hlist_del(&obj->node);
 847                raw_spin_unlock_irqrestore(&db->lock, flags);
 848                free_object(obj);
 849                return;
 850        }
 851out_unlock:
 852        raw_spin_unlock_irqrestore(&db->lock, flags);
 853}
 854EXPORT_SYMBOL_GPL(debug_object_free);
 855
 856/**
 857 * debug_object_assert_init - debug checks when object should be init-ed
 858 * @addr:       address of the object
 859 * @descr:      pointer to an object specific debug description structure
 860 */
 861void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
 862{
 863        struct debug_bucket *db;
 864        struct debug_obj *obj;
 865        unsigned long flags;
 866
 867        if (!debug_objects_enabled)
 868                return;
 869
 870        db = get_bucket((unsigned long) addr);
 871
 872        raw_spin_lock_irqsave(&db->lock, flags);
 873
 874        obj = lookup_object(addr, db);
 875        if (!obj) {
 876                struct debug_obj o = { .object = addr,
 877                                       .state = ODEBUG_STATE_NOTAVAILABLE,
 878                                       .descr = descr };
 879
 880                raw_spin_unlock_irqrestore(&db->lock, flags);
 881                /*
 882                 * Maybe the object is static, and we let the type specific
 883                 * code confirm. Track this static object if true, else invoke
 884                 * fixup.
 885                 */
 886                if (descr->is_static_object && descr->is_static_object(addr)) {
 887                        /* Track this static object */
 888                        debug_object_init(addr, descr);
 889                } else {
 890                        debug_print_object(&o, "assert_init");
 891                        debug_object_fixup(descr->fixup_assert_init, addr,
 892                                           ODEBUG_STATE_NOTAVAILABLE);
 893                }
 894                return;
 895        }
 896
 897        raw_spin_unlock_irqrestore(&db->lock, flags);
 898}
 899EXPORT_SYMBOL_GPL(debug_object_assert_init);
 900
 901/**
 902 * debug_object_active_state - debug checks object usage state machine
 903 * @addr:       address of the object
 904 * @descr:      pointer to an object specific debug description structure
 905 * @expect:     expected state
 906 * @next:       state to move to if expected state is found
 907 */
 908void
 909debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
 910                          unsigned int expect, unsigned int next)
 911{
 912        struct debug_bucket *db;
 913        struct debug_obj *obj;
 914        unsigned long flags;
 915        bool print_object = false;
 916
 917        if (!debug_objects_enabled)
 918                return;
 919
 920        db = get_bucket((unsigned long) addr);
 921
 922        raw_spin_lock_irqsave(&db->lock, flags);
 923
 924        obj = lookup_object(addr, db);
 925        if (obj) {
 926                switch (obj->state) {
 927                case ODEBUG_STATE_ACTIVE:
 928                        if (obj->astate == expect)
 929                                obj->astate = next;
 930                        else
 931                                print_object = true;
 932                        break;
 933
 934                default:
 935                        print_object = true;
 936                        break;
 937                }
 938        }
 939
 940        raw_spin_unlock_irqrestore(&db->lock, flags);
 941        if (!obj) {
 942                struct debug_obj o = { .object = addr,
 943                                       .state = ODEBUG_STATE_NOTAVAILABLE,
 944                                       .descr = descr };
 945
 946                debug_print_object(&o, "active_state");
 947        } else if (print_object) {
 948                debug_print_object(obj, "active_state");
 949        }
 950}
 951EXPORT_SYMBOL_GPL(debug_object_active_state);
 952
 953#ifdef CONFIG_DEBUG_OBJECTS_FREE
 954static void __debug_check_no_obj_freed(const void *address, unsigned long size)
 955{
 956        unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
 957        const struct debug_obj_descr *descr;
 958        enum debug_obj_state state;
 959        struct debug_bucket *db;
 960        struct hlist_node *tmp;
 961        struct debug_obj *obj;
 962        int cnt, objs_checked = 0;
 963
 964        saddr = (unsigned long) address;
 965        eaddr = saddr + size;
 966        paddr = saddr & ODEBUG_CHUNK_MASK;
 967        chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
 968        chunks >>= ODEBUG_CHUNK_SHIFT;
 969
 970        for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
 971                db = get_bucket(paddr);
 972
 973repeat:
 974                cnt = 0;
 975                raw_spin_lock_irqsave(&db->lock, flags);
 976                hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
 977                        cnt++;
 978                        oaddr = (unsigned long) obj->object;
 979                        if (oaddr < saddr || oaddr >= eaddr)
 980                                continue;
 981
 982                        switch (obj->state) {
 983                        case ODEBUG_STATE_ACTIVE:
 984                                descr = obj->descr;
 985                                state = obj->state;
 986                                raw_spin_unlock_irqrestore(&db->lock, flags);
 987                                debug_print_object(obj, "free");
 988                                debug_object_fixup(descr->fixup_free,
 989                                                   (void *) oaddr, state);
 990                                goto repeat;
 991                        default:
 992                                hlist_del(&obj->node);
 993                                __free_object(obj);
 994                                break;
 995                        }
 996                }
 997                raw_spin_unlock_irqrestore(&db->lock, flags);
 998
 999                if (cnt > debug_objects_maxchain)
1000                        debug_objects_maxchain = cnt;
1001
1002                objs_checked += cnt;
1003        }
1004
1005        if (objs_checked > debug_objects_maxchecked)
1006                debug_objects_maxchecked = objs_checked;
1007
1008        /* Schedule work to actually kmem_cache_free() objects */
1009        if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1010                WRITE_ONCE(obj_freeing, true);
1011                schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1012        }
1013}
1014
1015void debug_check_no_obj_freed(const void *address, unsigned long size)
1016{
1017        if (debug_objects_enabled)
1018                __debug_check_no_obj_freed(address, size);
1019}
1020#endif
1021
1022#ifdef CONFIG_DEBUG_FS
1023
1024static int debug_stats_show(struct seq_file *m, void *v)
1025{
1026        int cpu, obj_percpu_free = 0;
1027
1028        for_each_possible_cpu(cpu)
1029                obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1030
1031        seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1032        seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1033        seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1034        seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1035        seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1036        seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1037        seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1038        seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1039        seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1040        seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1041        seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1042        seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1043        return 0;
1044}
1045DEFINE_SHOW_ATTRIBUTE(debug_stats);
1046
1047static int __init debug_objects_init_debugfs(void)
1048{
1049        struct dentry *dbgdir;
1050
1051        if (!debug_objects_enabled)
1052                return 0;
1053
1054        dbgdir = debugfs_create_dir("debug_objects", NULL);
1055
1056        debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1057
1058        return 0;
1059}
1060__initcall(debug_objects_init_debugfs);
1061
1062#else
1063static inline void debug_objects_init_debugfs(void) { }
1064#endif
1065
1066#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1067
1068/* Random data structure for the self test */
1069struct self_test {
1070        unsigned long   dummy1[6];
1071        int             static_init;
1072        unsigned long   dummy2[3];
1073};
1074
1075static __initconst const struct debug_obj_descr descr_type_test;
1076
1077static bool __init is_static_object(void *addr)
1078{
1079        struct self_test *obj = addr;
1080
1081        return obj->static_init;
1082}
1083
1084/*
1085 * fixup_init is called when:
1086 * - an active object is initialized
1087 */
1088static bool __init fixup_init(void *addr, enum debug_obj_state state)
1089{
1090        struct self_test *obj = addr;
1091
1092        switch (state) {
1093        case ODEBUG_STATE_ACTIVE:
1094                debug_object_deactivate(obj, &descr_type_test);
1095                debug_object_init(obj, &descr_type_test);
1096                return true;
1097        default:
1098                return false;
1099        }
1100}
1101
1102/*
1103 * fixup_activate is called when:
1104 * - an active object is activated
1105 * - an unknown non-static object is activated
1106 */
1107static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1108{
1109        struct self_test *obj = addr;
1110
1111        switch (state) {
1112        case ODEBUG_STATE_NOTAVAILABLE:
1113                return true;
1114        case ODEBUG_STATE_ACTIVE:
1115                debug_object_deactivate(obj, &descr_type_test);
1116                debug_object_activate(obj, &descr_type_test);
1117                return true;
1118
1119        default:
1120                return false;
1121        }
1122}
1123
1124/*
1125 * fixup_destroy is called when:
1126 * - an active object is destroyed
1127 */
1128static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1129{
1130        struct self_test *obj = addr;
1131
1132        switch (state) {
1133        case ODEBUG_STATE_ACTIVE:
1134                debug_object_deactivate(obj, &descr_type_test);
1135                debug_object_destroy(obj, &descr_type_test);
1136                return true;
1137        default:
1138                return false;
1139        }
1140}
1141
1142/*
1143 * fixup_free is called when:
1144 * - an active object is freed
1145 */
1146static bool __init fixup_free(void *addr, enum debug_obj_state state)
1147{
1148        struct self_test *obj = addr;
1149
1150        switch (state) {
1151        case ODEBUG_STATE_ACTIVE:
1152                debug_object_deactivate(obj, &descr_type_test);
1153                debug_object_free(obj, &descr_type_test);
1154                return true;
1155        default:
1156                return false;
1157        }
1158}
1159
1160static int __init
1161check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1162{
1163        struct debug_bucket *db;
1164        struct debug_obj *obj;
1165        unsigned long flags;
1166        int res = -EINVAL;
1167
1168        db = get_bucket((unsigned long) addr);
1169
1170        raw_spin_lock_irqsave(&db->lock, flags);
1171
1172        obj = lookup_object(addr, db);
1173        if (!obj && state != ODEBUG_STATE_NONE) {
1174                WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1175                goto out;
1176        }
1177        if (obj && obj->state != state) {
1178                WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1179                       obj->state, state);
1180                goto out;
1181        }
1182        if (fixups != debug_objects_fixups) {
1183                WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1184                       fixups, debug_objects_fixups);
1185                goto out;
1186        }
1187        if (warnings != debug_objects_warnings) {
1188                WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1189                       warnings, debug_objects_warnings);
1190                goto out;
1191        }
1192        res = 0;
1193out:
1194        raw_spin_unlock_irqrestore(&db->lock, flags);
1195        if (res)
1196                debug_objects_enabled = 0;
1197        return res;
1198}
1199
1200static __initconst const struct debug_obj_descr descr_type_test = {
1201        .name                   = "selftest",
1202        .is_static_object       = is_static_object,
1203        .fixup_init             = fixup_init,
1204        .fixup_activate         = fixup_activate,
1205        .fixup_destroy          = fixup_destroy,
1206        .fixup_free             = fixup_free,
1207};
1208
1209static __initdata struct self_test obj = { .static_init = 0 };
1210
1211static void __init debug_objects_selftest(void)
1212{
1213        int fixups, oldfixups, warnings, oldwarnings;
1214        unsigned long flags;
1215
1216        local_irq_save(flags);
1217
1218        fixups = oldfixups = debug_objects_fixups;
1219        warnings = oldwarnings = debug_objects_warnings;
1220        descr_test = &descr_type_test;
1221
1222        debug_object_init(&obj, &descr_type_test);
1223        if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1224                goto out;
1225        debug_object_activate(&obj, &descr_type_test);
1226        if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1227                goto out;
1228        debug_object_activate(&obj, &descr_type_test);
1229        if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1230                goto out;
1231        debug_object_deactivate(&obj, &descr_type_test);
1232        if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1233                goto out;
1234        debug_object_destroy(&obj, &descr_type_test);
1235        if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1236                goto out;
1237        debug_object_init(&obj, &descr_type_test);
1238        if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1239                goto out;
1240        debug_object_activate(&obj, &descr_type_test);
1241        if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1242                goto out;
1243        debug_object_deactivate(&obj, &descr_type_test);
1244        if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1245                goto out;
1246        debug_object_free(&obj, &descr_type_test);
1247        if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1248                goto out;
1249
1250        obj.static_init = 1;
1251        debug_object_activate(&obj, &descr_type_test);
1252        if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1253                goto out;
1254        debug_object_init(&obj, &descr_type_test);
1255        if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1256                goto out;
1257        debug_object_free(&obj, &descr_type_test);
1258        if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1259                goto out;
1260
1261#ifdef CONFIG_DEBUG_OBJECTS_FREE
1262        debug_object_init(&obj, &descr_type_test);
1263        if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1264                goto out;
1265        debug_object_activate(&obj, &descr_type_test);
1266        if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1267                goto out;
1268        __debug_check_no_obj_freed(&obj, sizeof(obj));
1269        if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1270                goto out;
1271#endif
1272        pr_info("selftest passed\n");
1273
1274out:
1275        debug_objects_fixups = oldfixups;
1276        debug_objects_warnings = oldwarnings;
1277        descr_test = NULL;
1278
1279        local_irq_restore(flags);
1280}
1281#else
1282static inline void debug_objects_selftest(void) { }
1283#endif
1284
1285/*
1286 * Called during early boot to initialize the hash buckets and link
1287 * the static object pool objects into the poll list. After this call
1288 * the object tracker is fully operational.
1289 */
1290void __init debug_objects_early_init(void)
1291{
1292        int i;
1293
1294        for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1295                raw_spin_lock_init(&obj_hash[i].lock);
1296
1297        for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1298                hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1299}
1300
1301/*
1302 * Convert the statically allocated objects to dynamic ones:
1303 */
1304static int __init debug_objects_replace_static_objects(void)
1305{
1306        struct debug_bucket *db = obj_hash;
1307        struct hlist_node *tmp;
1308        struct debug_obj *obj, *new;
1309        HLIST_HEAD(objects);
1310        int i, cnt = 0;
1311
1312        for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1313                obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1314                if (!obj)
1315                        goto free;
1316                hlist_add_head(&obj->node, &objects);
1317        }
1318
1319        /*
1320         * debug_objects_mem_init() is now called early that only one CPU is up
1321         * and interrupts have been disabled, so it is safe to replace the
1322         * active object references.
1323         */
1324
1325        /* Remove the statically allocated objects from the pool */
1326        hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1327                hlist_del(&obj->node);
1328        /* Move the allocated objects to the pool */
1329        hlist_move_list(&objects, &obj_pool);
1330
1331        /* Replace the active object references */
1332        for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1333                hlist_move_list(&db->list, &objects);
1334
1335                hlist_for_each_entry(obj, &objects, node) {
1336                        new = hlist_entry(obj_pool.first, typeof(*obj), node);
1337                        hlist_del(&new->node);
1338                        /* copy object data */
1339                        *new = *obj;
1340                        hlist_add_head(&new->node, &db->list);
1341                        cnt++;
1342                }
1343        }
1344
1345        pr_debug("%d of %d active objects replaced\n",
1346                 cnt, obj_pool_used);
1347        return 0;
1348free:
1349        hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1350                hlist_del(&obj->node);
1351                kmem_cache_free(obj_cache, obj);
1352        }
1353        return -ENOMEM;
1354}
1355
1356/*
1357 * Called after the kmem_caches are functional to setup a dedicated
1358 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1359 * prevents that the debug code is called on kmem_cache_free() for the
1360 * debug tracker objects to avoid recursive calls.
1361 */
1362void __init debug_objects_mem_init(void)
1363{
1364        int cpu, extras;
1365
1366        if (!debug_objects_enabled)
1367                return;
1368
1369        /*
1370         * Initialize the percpu object pools
1371         *
1372         * Initialization is not strictly necessary, but was done for
1373         * completeness.
1374         */
1375        for_each_possible_cpu(cpu)
1376                INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1377
1378        obj_cache = kmem_cache_create("debug_objects_cache",
1379                                      sizeof (struct debug_obj), 0,
1380                                      SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1381                                      NULL);
1382
1383        if (!obj_cache || debug_objects_replace_static_objects()) {
1384                debug_objects_enabled = 0;
1385                kmem_cache_destroy(obj_cache);
1386                pr_warn("out of memory.\n");
1387        } else
1388                debug_objects_selftest();
1389
1390#ifdef CONFIG_HOTPLUG_CPU
1391        cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1392                                        object_cpu_offline);
1393#endif
1394
1395        /*
1396         * Increase the thresholds for allocating and freeing objects
1397         * according to the number of possible CPUs available in the system.
1398         */
1399        extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1400        debug_objects_pool_size += extras;
1401        debug_objects_pool_min_level += extras;
1402}
1403