linux/mm/kmemleak.c
<<
>>
Prefs
   1/*
   2 * mm/kmemleak.c
   3 *
   4 * Copyright (C) 2008 ARM Limited
   5 * Written by Catalin Marinas <catalin.marinas@arm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19 *
  20 *
  21 * For more information on the algorithm and kmemleak usage, please see
  22 * Documentation/dev-tools/kmemleak.rst.
  23 *
  24 * Notes on locking
  25 * ----------------
  26 *
  27 * The following locks and mutexes are used by kmemleak:
  28 *
  29 * - kmemleak_lock (rwlock): protects the object_list modifications and
  30 *   accesses to the object_tree_root. The object_list is the main list
  31 *   holding the metadata (struct kmemleak_object) for the allocated memory
  32 *   blocks. The object_tree_root is a red black tree used to look-up
  33 *   metadata based on a pointer to the corresponding memory block.  The
  34 *   kmemleak_object structures are added to the object_list and
  35 *   object_tree_root in the create_object() function called from the
  36 *   kmemleak_alloc() callback and removed in delete_object() called from the
  37 *   kmemleak_free() callback
  38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
  39 *   the metadata (e.g. count) are protected by this lock. Note that some
  40 *   members of this structure may be protected by other means (atomic or
  41 *   kmemleak_lock). This lock is also held when scanning the corresponding
  42 *   memory block to avoid the kernel freeing it via the kmemleak_free()
  43 *   callback. This is less heavyweight than holding a global lock like
  44 *   kmemleak_lock during scanning
  45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
  46 *   unreferenced objects at a time. The gray_list contains the objects which
  47 *   are already referenced or marked as false positives and need to be
  48 *   scanned. This list is only modified during a scanning episode when the
  49 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
  50 *   Note that the kmemleak_object.use_count is incremented when an object is
  51 *   added to the gray_list and therefore cannot be freed. This mutex also
  52 *   prevents multiple users of the "kmemleak" debugfs file together with
  53 *   modifications to the memory scanning parameters including the scan_thread
  54 *   pointer
  55 *
  56 * Locks and mutexes are acquired/nested in the following order:
  57 *
  58 *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
  59 *
  60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
  61 * regions.
  62 *
  63 * The kmemleak_object structures have a use_count incremented or decremented
  64 * using the get_object()/put_object() functions. When the use_count becomes
  65 * 0, this count can no longer be incremented and put_object() schedules the
  66 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
  67 * function must be protected by rcu_read_lock() to avoid accessing a freed
  68 * structure.
  69 */
  70
  71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  72
  73#include <linux/init.h>
  74#include <linux/kernel.h>
  75#include <linux/list.h>
  76#include <linux/sched/signal.h>
  77#include <linux/sched/task.h>
  78#include <linux/sched/task_stack.h>
  79#include <linux/jiffies.h>
  80#include <linux/delay.h>
  81#include <linux/export.h>
  82#include <linux/kthread.h>
  83#include <linux/rbtree.h>
  84#include <linux/fs.h>
  85#include <linux/debugfs.h>
  86#include <linux/seq_file.h>
  87#include <linux/cpumask.h>
  88#include <linux/spinlock.h>
  89#include <linux/mutex.h>
  90#include <linux/rcupdate.h>
  91#include <linux/stacktrace.h>
  92#include <linux/cache.h>
  93#include <linux/percpu.h>
  94#include <linux/bootmem.h>
  95#include <linux/pfn.h>
  96#include <linux/mmzone.h>
  97#include <linux/slab.h>
  98#include <linux/thread_info.h>
  99#include <linux/err.h>
 100#include <linux/uaccess.h>
 101#include <linux/string.h>
 102#include <linux/nodemask.h>
 103#include <linux/mm.h>
 104#include <linux/workqueue.h>
 105#include <linux/crc32.h>
 106
 107#include <asm/sections.h>
 108#include <asm/processor.h>
 109#include <linux/atomic.h>
 110
 111#include <linux/kasan.h>
 112#include <linux/kmemleak.h>
 113#include <linux/memory_hotplug.h>
 114
 115/*
 116 * Kmemleak configuration and common defines.
 117 */
 118#define MAX_TRACE               16      /* stack trace length */
 119#define MSECS_MIN_AGE           5000    /* minimum object age for reporting */
 120#define SECS_FIRST_SCAN         60      /* delay before the first scan */
 121#define SECS_SCAN_WAIT          600     /* subsequent auto scanning delay */
 122#define MAX_SCAN_SIZE           4096    /* maximum size of a scanned block */
 123
 124#define BYTES_PER_POINTER       sizeof(void *)
 125
 126/* GFP bitmask for kmemleak internal allocations */
 127#define gfp_kmemleak_mask(gfp)  (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
 128                                 __GFP_NORETRY | __GFP_NOMEMALLOC | \
 129                                 __GFP_NOWARN | __GFP_NOFAIL)
 130
 131/* scanning area inside a memory block */
 132struct kmemleak_scan_area {
 133        struct hlist_node node;
 134        unsigned long start;
 135        size_t size;
 136};
 137
 138#define KMEMLEAK_GREY   0
 139#define KMEMLEAK_BLACK  -1
 140
 141/*
 142 * Structure holding the metadata for each allocated memory block.
 143 * Modifications to such objects should be made while holding the
 144 * object->lock. Insertions or deletions from object_list, gray_list or
 145 * rb_node are already protected by the corresponding locks or mutex (see
 146 * the notes on locking above). These objects are reference-counted
 147 * (use_count) and freed using the RCU mechanism.
 148 */
 149struct kmemleak_object {
 150        spinlock_t lock;
 151        unsigned int flags;             /* object status flags */
 152        struct list_head object_list;
 153        struct list_head gray_list;
 154        struct rb_node rb_node;
 155        struct rcu_head rcu;            /* object_list lockless traversal */
 156        /* object usage count; object freed when use_count == 0 */
 157        atomic_t use_count;
 158        unsigned long pointer;
 159        size_t size;
 160        /* pass surplus references to this pointer */
 161        unsigned long excess_ref;
 162        /* minimum number of a pointers found before it is considered leak */
 163        int min_count;
 164        /* the total number of pointers found pointing to this object */
 165        int count;
 166        /* checksum for detecting modified objects */
 167        u32 checksum;
 168        /* memory ranges to be scanned inside an object (empty for all) */
 169        struct hlist_head area_list;
 170        unsigned long trace[MAX_TRACE];
 171        unsigned int trace_len;
 172        unsigned long jiffies;          /* creation timestamp */
 173        pid_t pid;                      /* pid of the current task */
 174        char comm[TASK_COMM_LEN];       /* executable name */
 175};
 176
 177/* flag representing the memory block allocation status */
 178#define OBJECT_ALLOCATED        (1 << 0)
 179/* flag set after the first reporting of an unreference object */
 180#define OBJECT_REPORTED         (1 << 1)
 181/* flag set to not scan the object */
 182#define OBJECT_NO_SCAN          (1 << 2)
 183
 184/* number of bytes to print per line; must be 16 or 32 */
 185#define HEX_ROW_SIZE            16
 186/* number of bytes to print at a time (1, 2, 4, 8) */
 187#define HEX_GROUP_SIZE          1
 188/* include ASCII after the hex output */
 189#define HEX_ASCII               1
 190/* max number of lines to be printed */
 191#define HEX_MAX_LINES           2
 192
 193/* the list of all allocated objects */
 194static LIST_HEAD(object_list);
 195/* the list of gray-colored objects (see color_gray comment below) */
 196static LIST_HEAD(gray_list);
 197/* search tree for object boundaries */
 198static struct rb_root object_tree_root = RB_ROOT;
 199/* rw_lock protecting the access to object_list and object_tree_root */
 200static DEFINE_RWLOCK(kmemleak_lock);
 201
 202/* allocation caches for kmemleak internal data */
 203static struct kmem_cache *object_cache;
 204static struct kmem_cache *scan_area_cache;
 205
 206/* set if tracing memory operations is enabled */
 207static int kmemleak_enabled;
 208/* same as above but only for the kmemleak_free() callback */
 209static int kmemleak_free_enabled;
 210/* set in the late_initcall if there were no errors */
 211static int kmemleak_initialized;
 212/* enables or disables early logging of the memory operations */
 213static int kmemleak_early_log = 1;
 214/* set if a kmemleak warning was issued */
 215static int kmemleak_warning;
 216/* set if a fatal kmemleak error has occurred */
 217static int kmemleak_error;
 218
 219/* minimum and maximum address that may be valid pointers */
 220static unsigned long min_addr = ULONG_MAX;
 221static unsigned long max_addr;
 222
 223static struct task_struct *scan_thread;
 224/* used to avoid reporting of recently allocated objects */
 225static unsigned long jiffies_min_age;
 226static unsigned long jiffies_last_scan;
 227/* delay between automatic memory scannings */
 228static signed long jiffies_scan_wait;
 229/* enables or disables the task stacks scanning */
 230static int kmemleak_stack_scan = 1;
 231/* protects the memory scanning, parameters and debug/kmemleak file access */
 232static DEFINE_MUTEX(scan_mutex);
 233/* setting kmemleak=on, will set this var, skipping the disable */
 234static int kmemleak_skip_disable;
 235/* If there are leaks that can be reported */
 236static bool kmemleak_found_leaks;
 237
 238/*
 239 * Early object allocation/freeing logging. Kmemleak is initialized after the
 240 * kernel allocator. However, both the kernel allocator and kmemleak may
 241 * allocate memory blocks which need to be tracked. Kmemleak defines an
 242 * arbitrary buffer to hold the allocation/freeing information before it is
 243 * fully initialized.
 244 */
 245
 246/* kmemleak operation type for early logging */
 247enum {
 248        KMEMLEAK_ALLOC,
 249        KMEMLEAK_ALLOC_PERCPU,
 250        KMEMLEAK_FREE,
 251        KMEMLEAK_FREE_PART,
 252        KMEMLEAK_FREE_PERCPU,
 253        KMEMLEAK_NOT_LEAK,
 254        KMEMLEAK_IGNORE,
 255        KMEMLEAK_SCAN_AREA,
 256        KMEMLEAK_NO_SCAN,
 257        KMEMLEAK_SET_EXCESS_REF
 258};
 259
 260/*
 261 * Structure holding the information passed to kmemleak callbacks during the
 262 * early logging.
 263 */
 264struct early_log {
 265        int op_type;                    /* kmemleak operation type */
 266        int min_count;                  /* minimum reference count */
 267        const void *ptr;                /* allocated/freed memory block */
 268        union {
 269                size_t size;            /* memory block size */
 270                unsigned long excess_ref; /* surplus reference passing */
 271        };
 272        unsigned long trace[MAX_TRACE]; /* stack trace */
 273        unsigned int trace_len;         /* stack trace length */
 274};
 275
 276/* early logging buffer and current position */
 277static struct early_log
 278        early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
 279static int crt_early_log __initdata;
 280
 281static void kmemleak_disable(void);
 282
 283/*
 284 * Print a warning and dump the stack trace.
 285 */
 286#define kmemleak_warn(x...)     do {            \
 287        pr_warn(x);                             \
 288        dump_stack();                           \
 289        kmemleak_warning = 1;                   \
 290} while (0)
 291
 292/*
 293 * Macro invoked when a serious kmemleak condition occurred and cannot be
 294 * recovered from. Kmemleak will be disabled and further allocation/freeing
 295 * tracing no longer available.
 296 */
 297#define kmemleak_stop(x...)     do {    \
 298        kmemleak_warn(x);               \
 299        kmemleak_disable();             \
 300} while (0)
 301
 302/*
 303 * Printing of the objects hex dump to the seq file. The number of lines to be
 304 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
 305 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
 306 * with the object->lock held.
 307 */
 308static void hex_dump_object(struct seq_file *seq,
 309                            struct kmemleak_object *object)
 310{
 311        const u8 *ptr = (const u8 *)object->pointer;
 312        size_t len;
 313
 314        /* limit the number of lines to HEX_MAX_LINES */
 315        len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
 316
 317        seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
 318        kasan_disable_current();
 319        seq_hex_dump(seq, "    ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
 320                     HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
 321        kasan_enable_current();
 322}
 323
 324/*
 325 * Object colors, encoded with count and min_count:
 326 * - white - orphan object, not enough references to it (count < min_count)
 327 * - gray  - not orphan, not marked as false positive (min_count == 0) or
 328 *              sufficient references to it (count >= min_count)
 329 * - black - ignore, it doesn't contain references (e.g. text section)
 330 *              (min_count == -1). No function defined for this color.
 331 * Newly created objects don't have any color assigned (object->count == -1)
 332 * before the next memory scan when they become white.
 333 */
 334static bool color_white(const struct kmemleak_object *object)
 335{
 336        return object->count != KMEMLEAK_BLACK &&
 337                object->count < object->min_count;
 338}
 339
 340static bool color_gray(const struct kmemleak_object *object)
 341{
 342        return object->min_count != KMEMLEAK_BLACK &&
 343                object->count >= object->min_count;
 344}
 345
 346/*
 347 * Objects are considered unreferenced only if their color is white, they have
 348 * not be deleted and have a minimum age to avoid false positives caused by
 349 * pointers temporarily stored in CPU registers.
 350 */
 351static bool unreferenced_object(struct kmemleak_object *object)
 352{
 353        return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
 354                time_before_eq(object->jiffies + jiffies_min_age,
 355                               jiffies_last_scan);
 356}
 357
 358/*
 359 * Printing of the unreferenced objects information to the seq file. The
 360 * print_unreferenced function must be called with the object->lock held.
 361 */
 362static void print_unreferenced(struct seq_file *seq,
 363                               struct kmemleak_object *object)
 364{
 365        int i;
 366        unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
 367
 368        seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
 369                   object->pointer, object->size);
 370        seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
 371                   object->comm, object->pid, object->jiffies,
 372                   msecs_age / 1000, msecs_age % 1000);
 373        hex_dump_object(seq, object);
 374        seq_printf(seq, "  backtrace:\n");
 375
 376        for (i = 0; i < object->trace_len; i++) {
 377                void *ptr = (void *)object->trace[i];
 378                seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
 379        }
 380}
 381
 382/*
 383 * Print the kmemleak_object information. This function is used mainly for
 384 * debugging special cases when kmemleak operations. It must be called with
 385 * the object->lock held.
 386 */
 387static void dump_object_info(struct kmemleak_object *object)
 388{
 389        struct stack_trace trace;
 390
 391        trace.nr_entries = object->trace_len;
 392        trace.entries = object->trace;
 393
 394        pr_notice("Object 0x%08lx (size %zu):\n",
 395                  object->pointer, object->size);
 396        pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
 397                  object->comm, object->pid, object->jiffies);
 398        pr_notice("  min_count = %d\n", object->min_count);
 399        pr_notice("  count = %d\n", object->count);
 400        pr_notice("  flags = 0x%x\n", object->flags);
 401        pr_notice("  checksum = %u\n", object->checksum);
 402        pr_notice("  backtrace:\n");
 403        print_stack_trace(&trace, 4);
 404}
 405
 406/*
 407 * Look-up a memory block metadata (kmemleak_object) in the object search
 408 * tree based on a pointer value. If alias is 0, only values pointing to the
 409 * beginning of the memory block are allowed. The kmemleak_lock must be held
 410 * when calling this function.
 411 */
 412static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
 413{
 414        struct rb_node *rb = object_tree_root.rb_node;
 415
 416        while (rb) {
 417                struct kmemleak_object *object =
 418                        rb_entry(rb, struct kmemleak_object, rb_node);
 419                if (ptr < object->pointer)
 420                        rb = object->rb_node.rb_left;
 421                else if (object->pointer + object->size <= ptr)
 422                        rb = object->rb_node.rb_right;
 423                else if (object->pointer == ptr || alias)
 424                        return object;
 425                else {
 426                        kmemleak_warn("Found object by alias at 0x%08lx\n",
 427                                      ptr);
 428                        dump_object_info(object);
 429                        break;
 430                }
 431        }
 432        return NULL;
 433}
 434
 435/*
 436 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
 437 * that once an object's use_count reached 0, the RCU freeing was already
 438 * registered and the object should no longer be used. This function must be
 439 * called under the protection of rcu_read_lock().
 440 */
 441static int get_object(struct kmemleak_object *object)
 442{
 443        return atomic_inc_not_zero(&object->use_count);
 444}
 445
 446/*
 447 * RCU callback to free a kmemleak_object.
 448 */
 449static void free_object_rcu(struct rcu_head *rcu)
 450{
 451        struct hlist_node *tmp;
 452        struct kmemleak_scan_area *area;
 453        struct kmemleak_object *object =
 454                container_of(rcu, struct kmemleak_object, rcu);
 455
 456        /*
 457         * Once use_count is 0 (guaranteed by put_object), there is no other
 458         * code accessing this object, hence no need for locking.
 459         */
 460        hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
 461                hlist_del(&area->node);
 462                kmem_cache_free(scan_area_cache, area);
 463        }
 464        kmem_cache_free(object_cache, object);
 465}
 466
 467/*
 468 * Decrement the object use_count. Once the count is 0, free the object using
 469 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
 470 * delete_object() path, the delayed RCU freeing ensures that there is no
 471 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
 472 * is also possible.
 473 */
 474static void put_object(struct kmemleak_object *object)
 475{
 476        if (!atomic_dec_and_test(&object->use_count))
 477                return;
 478
 479        /* should only get here after delete_object was called */
 480        WARN_ON(object->flags & OBJECT_ALLOCATED);
 481
 482        call_rcu(&object->rcu, free_object_rcu);
 483}
 484
 485/*
 486 * Look up an object in the object search tree and increase its use_count.
 487 */
 488static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 489{
 490        unsigned long flags;
 491        struct kmemleak_object *object;
 492
 493        rcu_read_lock();
 494        read_lock_irqsave(&kmemleak_lock, flags);
 495        object = lookup_object(ptr, alias);
 496        read_unlock_irqrestore(&kmemleak_lock, flags);
 497
 498        /* check whether the object is still available */
 499        if (object && !get_object(object))
 500                object = NULL;
 501        rcu_read_unlock();
 502
 503        return object;
 504}
 505
 506/*
 507 * Look up an object in the object search tree and remove it from both
 508 * object_tree_root and object_list. The returned object's use_count should be
 509 * at least 1, as initially set by create_object().
 510 */
 511static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
 512{
 513        unsigned long flags;
 514        struct kmemleak_object *object;
 515
 516        write_lock_irqsave(&kmemleak_lock, flags);
 517        object = lookup_object(ptr, alias);
 518        if (object) {
 519                rb_erase(&object->rb_node, &object_tree_root);
 520                list_del_rcu(&object->object_list);
 521        }
 522        write_unlock_irqrestore(&kmemleak_lock, flags);
 523
 524        return object;
 525}
 526
 527/*
 528 * Save stack trace to the given array of MAX_TRACE size.
 529 */
 530static int __save_stack_trace(unsigned long *trace)
 531{
 532        struct stack_trace stack_trace;
 533
 534        stack_trace.max_entries = MAX_TRACE;
 535        stack_trace.nr_entries = 0;
 536        stack_trace.entries = trace;
 537        stack_trace.skip = 2;
 538        save_stack_trace(&stack_trace);
 539
 540        return stack_trace.nr_entries;
 541}
 542
 543/*
 544 * Create the metadata (struct kmemleak_object) corresponding to an allocated
 545 * memory block and add it to the object_list and object_tree_root.
 546 */
 547static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 548                                             int min_count, gfp_t gfp)
 549{
 550        unsigned long flags;
 551        struct kmemleak_object *object, *parent;
 552        struct rb_node **link, *rb_parent;
 553
 554        object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
 555        if (!object) {
 556                pr_warn("Cannot allocate a kmemleak_object structure\n");
 557                kmemleak_disable();
 558                return NULL;
 559        }
 560
 561        INIT_LIST_HEAD(&object->object_list);
 562        INIT_LIST_HEAD(&object->gray_list);
 563        INIT_HLIST_HEAD(&object->area_list);
 564        spin_lock_init(&object->lock);
 565        atomic_set(&object->use_count, 1);
 566        object->flags = OBJECT_ALLOCATED;
 567        object->pointer = ptr;
 568        object->size = size;
 569        object->excess_ref = 0;
 570        object->min_count = min_count;
 571        object->count = 0;                      /* white color initially */
 572        object->jiffies = jiffies;
 573        object->checksum = 0;
 574
 575        /* task information */
 576        if (in_irq()) {
 577                object->pid = 0;
 578                strncpy(object->comm, "hardirq", sizeof(object->comm));
 579        } else if (in_softirq()) {
 580                object->pid = 0;
 581                strncpy(object->comm, "softirq", sizeof(object->comm));
 582        } else {
 583                object->pid = current->pid;
 584                /*
 585                 * There is a small chance of a race with set_task_comm(),
 586                 * however using get_task_comm() here may cause locking
 587                 * dependency issues with current->alloc_lock. In the worst
 588                 * case, the command line is not correct.
 589                 */
 590                strncpy(object->comm, current->comm, sizeof(object->comm));
 591        }
 592
 593        /* kernel backtrace */
 594        object->trace_len = __save_stack_trace(object->trace);
 595
 596        write_lock_irqsave(&kmemleak_lock, flags);
 597
 598        min_addr = min(min_addr, ptr);
 599        max_addr = max(max_addr, ptr + size);
 600        link = &object_tree_root.rb_node;
 601        rb_parent = NULL;
 602        while (*link) {
 603                rb_parent = *link;
 604                parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
 605                if (ptr + size <= parent->pointer)
 606                        link = &parent->rb_node.rb_left;
 607                else if (parent->pointer + parent->size <= ptr)
 608                        link = &parent->rb_node.rb_right;
 609                else {
 610                        kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
 611                                      ptr);
 612                        /*
 613                         * No need for parent->lock here since "parent" cannot
 614                         * be freed while the kmemleak_lock is held.
 615                         */
 616                        dump_object_info(parent);
 617                        kmem_cache_free(object_cache, object);
 618                        object = NULL;
 619                        goto out;
 620                }
 621        }
 622        rb_link_node(&object->rb_node, rb_parent, link);
 623        rb_insert_color(&object->rb_node, &object_tree_root);
 624
 625        list_add_tail_rcu(&object->object_list, &object_list);
 626out:
 627        write_unlock_irqrestore(&kmemleak_lock, flags);
 628        return object;
 629}
 630
 631/*
 632 * Mark the object as not allocated and schedule RCU freeing via put_object().
 633 */
 634static void __delete_object(struct kmemleak_object *object)
 635{
 636        unsigned long flags;
 637
 638        WARN_ON(!(object->flags & OBJECT_ALLOCATED));
 639        WARN_ON(atomic_read(&object->use_count) < 1);
 640
 641        /*
 642         * Locking here also ensures that the corresponding memory block
 643         * cannot be freed when it is being scanned.
 644         */
 645        spin_lock_irqsave(&object->lock, flags);
 646        object->flags &= ~OBJECT_ALLOCATED;
 647        spin_unlock_irqrestore(&object->lock, flags);
 648        put_object(object);
 649}
 650
 651/*
 652 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 653 * delete it.
 654 */
 655static void delete_object_full(unsigned long ptr)
 656{
 657        struct kmemleak_object *object;
 658
 659        object = find_and_remove_object(ptr, 0);
 660        if (!object) {
 661#ifdef DEBUG
 662                kmemleak_warn("Freeing unknown object at 0x%08lx\n",
 663                              ptr);
 664#endif
 665                return;
 666        }
 667        __delete_object(object);
 668}
 669
 670/*
 671 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 672 * delete it. If the memory block is partially freed, the function may create
 673 * additional metadata for the remaining parts of the block.
 674 */
 675static void delete_object_part(unsigned long ptr, size_t size)
 676{
 677        struct kmemleak_object *object;
 678        unsigned long start, end;
 679
 680        object = find_and_remove_object(ptr, 1);
 681        if (!object) {
 682#ifdef DEBUG
 683                kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
 684                              ptr, size);
 685#endif
 686                return;
 687        }
 688
 689        /*
 690         * Create one or two objects that may result from the memory block
 691         * split. Note that partial freeing is only done by free_bootmem() and
 692         * this happens before kmemleak_init() is called. The path below is
 693         * only executed during early log recording in kmemleak_init(), so
 694         * GFP_KERNEL is enough.
 695         */
 696        start = object->pointer;
 697        end = object->pointer + object->size;
 698        if (ptr > start)
 699                create_object(start, ptr - start, object->min_count,
 700                              GFP_KERNEL);
 701        if (ptr + size < end)
 702                create_object(ptr + size, end - ptr - size, object->min_count,
 703                              GFP_KERNEL);
 704
 705        __delete_object(object);
 706}
 707
 708static void __paint_it(struct kmemleak_object *object, int color)
 709{
 710        object->min_count = color;
 711        if (color == KMEMLEAK_BLACK)
 712                object->flags |= OBJECT_NO_SCAN;
 713}
 714
 715static void paint_it(struct kmemleak_object *object, int color)
 716{
 717        unsigned long flags;
 718
 719        spin_lock_irqsave(&object->lock, flags);
 720        __paint_it(object, color);
 721        spin_unlock_irqrestore(&object->lock, flags);
 722}
 723
 724static void paint_ptr(unsigned long ptr, int color)
 725{
 726        struct kmemleak_object *object;
 727
 728        object = find_and_get_object(ptr, 0);
 729        if (!object) {
 730                kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
 731                              ptr,
 732                              (color == KMEMLEAK_GREY) ? "Grey" :
 733                              (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
 734                return;
 735        }
 736        paint_it(object, color);
 737        put_object(object);
 738}
 739
 740/*
 741 * Mark an object permanently as gray-colored so that it can no longer be
 742 * reported as a leak. This is used in general to mark a false positive.
 743 */
 744static void make_gray_object(unsigned long ptr)
 745{
 746        paint_ptr(ptr, KMEMLEAK_GREY);
 747}
 748
 749/*
 750 * Mark the object as black-colored so that it is ignored from scans and
 751 * reporting.
 752 */
 753static void make_black_object(unsigned long ptr)
 754{
 755        paint_ptr(ptr, KMEMLEAK_BLACK);
 756}
 757
 758/*
 759 * Add a scanning area to the object. If at least one such area is added,
 760 * kmemleak will only scan these ranges rather than the whole memory block.
 761 */
 762static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 763{
 764        unsigned long flags;
 765        struct kmemleak_object *object;
 766        struct kmemleak_scan_area *area;
 767
 768        object = find_and_get_object(ptr, 1);
 769        if (!object) {
 770                kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
 771                              ptr);
 772                return;
 773        }
 774
 775        area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 776        if (!area) {
 777                pr_warn("Cannot allocate a scan area\n");
 778                goto out;
 779        }
 780
 781        spin_lock_irqsave(&object->lock, flags);
 782        if (size == SIZE_MAX) {
 783                size = object->pointer + object->size - ptr;
 784        } else if (ptr + size > object->pointer + object->size) {
 785                kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 786                dump_object_info(object);
 787                kmem_cache_free(scan_area_cache, area);
 788                goto out_unlock;
 789        }
 790
 791        INIT_HLIST_NODE(&area->node);
 792        area->start = ptr;
 793        area->size = size;
 794
 795        hlist_add_head(&area->node, &object->area_list);
 796out_unlock:
 797        spin_unlock_irqrestore(&object->lock, flags);
 798out:
 799        put_object(object);
 800}
 801
 802/*
 803 * Any surplus references (object already gray) to 'ptr' are passed to
 804 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
 805 * vm_struct may be used as an alternative reference to the vmalloc'ed object
 806 * (see free_thread_stack()).
 807 */
 808static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
 809{
 810        unsigned long flags;
 811        struct kmemleak_object *object;
 812
 813        object = find_and_get_object(ptr, 0);
 814        if (!object) {
 815                kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
 816                              ptr);
 817                return;
 818        }
 819
 820        spin_lock_irqsave(&object->lock, flags);
 821        object->excess_ref = excess_ref;
 822        spin_unlock_irqrestore(&object->lock, flags);
 823        put_object(object);
 824}
 825
 826/*
 827 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
 828 * pointer. Such object will not be scanned by kmemleak but references to it
 829 * are searched.
 830 */
 831static void object_no_scan(unsigned long ptr)
 832{
 833        unsigned long flags;
 834        struct kmemleak_object *object;
 835
 836        object = find_and_get_object(ptr, 0);
 837        if (!object) {
 838                kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
 839                return;
 840        }
 841
 842        spin_lock_irqsave(&object->lock, flags);
 843        object->flags |= OBJECT_NO_SCAN;
 844        spin_unlock_irqrestore(&object->lock, flags);
 845        put_object(object);
 846}
 847
 848/*
 849 * Log an early kmemleak_* call to the early_log buffer. These calls will be
 850 * processed later once kmemleak is fully initialized.
 851 */
 852static void __init log_early(int op_type, const void *ptr, size_t size,
 853                             int min_count)
 854{
 855        unsigned long flags;
 856        struct early_log *log;
 857
 858        if (kmemleak_error) {
 859                /* kmemleak stopped recording, just count the requests */
 860                crt_early_log++;
 861                return;
 862        }
 863
 864        if (crt_early_log >= ARRAY_SIZE(early_log)) {
 865                crt_early_log++;
 866                kmemleak_disable();
 867                return;
 868        }
 869
 870        /*
 871         * There is no need for locking since the kernel is still in UP mode
 872         * at this stage. Disabling the IRQs is enough.
 873         */
 874        local_irq_save(flags);
 875        log = &early_log[crt_early_log];
 876        log->op_type = op_type;
 877        log->ptr = ptr;
 878        log->size = size;
 879        log->min_count = min_count;
 880        log->trace_len = __save_stack_trace(log->trace);
 881        crt_early_log++;
 882        local_irq_restore(flags);
 883}
 884
 885/*
 886 * Log an early allocated block and populate the stack trace.
 887 */
 888static void early_alloc(struct early_log *log)
 889{
 890        struct kmemleak_object *object;
 891        unsigned long flags;
 892        int i;
 893
 894        if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
 895                return;
 896
 897        /*
 898         * RCU locking needed to ensure object is not freed via put_object().
 899         */
 900        rcu_read_lock();
 901        object = create_object((unsigned long)log->ptr, log->size,
 902                               log->min_count, GFP_ATOMIC);
 903        if (!object)
 904                goto out;
 905        spin_lock_irqsave(&object->lock, flags);
 906        for (i = 0; i < log->trace_len; i++)
 907                object->trace[i] = log->trace[i];
 908        object->trace_len = log->trace_len;
 909        spin_unlock_irqrestore(&object->lock, flags);
 910out:
 911        rcu_read_unlock();
 912}
 913
 914/*
 915 * Log an early allocated block and populate the stack trace.
 916 */
 917static void early_alloc_percpu(struct early_log *log)
 918{
 919        unsigned int cpu;
 920        const void __percpu *ptr = log->ptr;
 921
 922        for_each_possible_cpu(cpu) {
 923                log->ptr = per_cpu_ptr(ptr, cpu);
 924                early_alloc(log);
 925        }
 926}
 927
 928/**
 929 * kmemleak_alloc - register a newly allocated object
 930 * @ptr:        pointer to beginning of the object
 931 * @size:       size of the object
 932 * @min_count:  minimum number of references to this object. If during memory
 933 *              scanning a number of references less than @min_count is found,
 934 *              the object is reported as a memory leak. If @min_count is 0,
 935 *              the object is never reported as a leak. If @min_count is -1,
 936 *              the object is ignored (not scanned and not reported as a leak)
 937 * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
 938 *
 939 * This function is called from the kernel allocators when a new object
 940 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
 941 */
 942void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 943                          gfp_t gfp)
 944{
 945        pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
 946
 947        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 948                create_object((unsigned long)ptr, size, min_count, gfp);
 949        else if (kmemleak_early_log)
 950                log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
 951}
 952EXPORT_SYMBOL_GPL(kmemleak_alloc);
 953
 954/**
 955 * kmemleak_alloc_percpu - register a newly allocated __percpu object
 956 * @ptr:        __percpu pointer to beginning of the object
 957 * @size:       size of the object
 958 * @gfp:        flags used for kmemleak internal memory allocations
 959 *
 960 * This function is called from the kernel percpu allocator when a new object
 961 * (memory block) is allocated (alloc_percpu).
 962 */
 963void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
 964                                 gfp_t gfp)
 965{
 966        unsigned int cpu;
 967
 968        pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
 969
 970        /*
 971         * Percpu allocations are only scanned and not reported as leaks
 972         * (min_count is set to 0).
 973         */
 974        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 975                for_each_possible_cpu(cpu)
 976                        create_object((unsigned long)per_cpu_ptr(ptr, cpu),
 977                                      size, 0, gfp);
 978        else if (kmemleak_early_log)
 979                log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
 980}
 981EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
 982
 983/**
 984 * kmemleak_vmalloc - register a newly vmalloc'ed object
 985 * @area:       pointer to vm_struct
 986 * @size:       size of the object
 987 * @gfp:        __vmalloc() flags used for kmemleak internal memory allocations
 988 *
 989 * This function is called from the vmalloc() kernel allocator when a new
 990 * object (memory block) is allocated.
 991 */
 992void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
 993{
 994        pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
 995
 996        /*
 997         * A min_count = 2 is needed because vm_struct contains a reference to
 998         * the virtual address of the vmalloc'ed block.
 999         */
1000        if (kmemleak_enabled) {
1001                create_object((unsigned long)area->addr, size, 2, gfp);
1002                object_set_excess_ref((unsigned long)area,
1003                                      (unsigned long)area->addr);
1004        } else if (kmemleak_early_log) {
1005                log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
1006                /* reusing early_log.size for storing area->addr */
1007                log_early(KMEMLEAK_SET_EXCESS_REF,
1008                          area, (unsigned long)area->addr, 0);
1009        }
1010}
1011EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1012
1013/**
1014 * kmemleak_free - unregister a previously registered object
1015 * @ptr:        pointer to beginning of the object
1016 *
1017 * This function is called from the kernel allocators when an object (memory
1018 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1019 */
1020void __ref kmemleak_free(const void *ptr)
1021{
1022        pr_debug("%s(0x%p)\n", __func__, ptr);
1023
1024        if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1025                delete_object_full((unsigned long)ptr);
1026        else if (kmemleak_early_log)
1027                log_early(KMEMLEAK_FREE, ptr, 0, 0);
1028}
1029EXPORT_SYMBOL_GPL(kmemleak_free);
1030
1031/**
1032 * kmemleak_free_part - partially unregister a previously registered object
1033 * @ptr:        pointer to the beginning or inside the object. This also
1034 *              represents the start of the range to be freed
1035 * @size:       size to be unregistered
1036 *
1037 * This function is called when only a part of a memory block is freed
1038 * (usually from the bootmem allocator).
1039 */
1040void __ref kmemleak_free_part(const void *ptr, size_t size)
1041{
1042        pr_debug("%s(0x%p)\n", __func__, ptr);
1043
1044        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1045                delete_object_part((unsigned long)ptr, size);
1046        else if (kmemleak_early_log)
1047                log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
1048}
1049EXPORT_SYMBOL_GPL(kmemleak_free_part);
1050
1051/**
1052 * kmemleak_free_percpu - unregister a previously registered __percpu object
1053 * @ptr:        __percpu pointer to beginning of the object
1054 *
1055 * This function is called from the kernel percpu allocator when an object
1056 * (memory block) is freed (free_percpu).
1057 */
1058void __ref kmemleak_free_percpu(const void __percpu *ptr)
1059{
1060        unsigned int cpu;
1061
1062        pr_debug("%s(0x%p)\n", __func__, ptr);
1063
1064        if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1065                for_each_possible_cpu(cpu)
1066                        delete_object_full((unsigned long)per_cpu_ptr(ptr,
1067                                                                      cpu));
1068        else if (kmemleak_early_log)
1069                log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1070}
1071EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1072
1073/**
1074 * kmemleak_update_trace - update object allocation stack trace
1075 * @ptr:        pointer to beginning of the object
1076 *
1077 * Override the object allocation stack trace for cases where the actual
1078 * allocation place is not always useful.
1079 */
1080void __ref kmemleak_update_trace(const void *ptr)
1081{
1082        struct kmemleak_object *object;
1083        unsigned long flags;
1084
1085        pr_debug("%s(0x%p)\n", __func__, ptr);
1086
1087        if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1088                return;
1089
1090        object = find_and_get_object((unsigned long)ptr, 1);
1091        if (!object) {
1092#ifdef DEBUG
1093                kmemleak_warn("Updating stack trace for unknown object at %p\n",
1094                              ptr);
1095#endif
1096                return;
1097        }
1098
1099        spin_lock_irqsave(&object->lock, flags);
1100        object->trace_len = __save_stack_trace(object->trace);
1101        spin_unlock_irqrestore(&object->lock, flags);
1102
1103        put_object(object);
1104}
1105EXPORT_SYMBOL(kmemleak_update_trace);
1106
1107/**
1108 * kmemleak_not_leak - mark an allocated object as false positive
1109 * @ptr:        pointer to beginning of the object
1110 *
1111 * Calling this function on an object will cause the memory block to no longer
1112 * be reported as leak and always be scanned.
1113 */
1114void __ref kmemleak_not_leak(const void *ptr)
1115{
1116        pr_debug("%s(0x%p)\n", __func__, ptr);
1117
1118        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1119                make_gray_object((unsigned long)ptr);
1120        else if (kmemleak_early_log)
1121                log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1122}
1123EXPORT_SYMBOL(kmemleak_not_leak);
1124
1125/**
1126 * kmemleak_ignore - ignore an allocated object
1127 * @ptr:        pointer to beginning of the object
1128 *
1129 * Calling this function on an object will cause the memory block to be
1130 * ignored (not scanned and not reported as a leak). This is usually done when
1131 * it is known that the corresponding block is not a leak and does not contain
1132 * any references to other allocated memory blocks.
1133 */
1134void __ref kmemleak_ignore(const void *ptr)
1135{
1136        pr_debug("%s(0x%p)\n", __func__, ptr);
1137
1138        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1139                make_black_object((unsigned long)ptr);
1140        else if (kmemleak_early_log)
1141                log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1142}
1143EXPORT_SYMBOL(kmemleak_ignore);
1144
1145/**
1146 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1147 * @ptr:        pointer to beginning or inside the object. This also
1148 *              represents the start of the scan area
1149 * @size:       size of the scan area
1150 * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1151 *
1152 * This function is used when it is known that only certain parts of an object
1153 * contain references to other objects. Kmemleak will only scan these areas
1154 * reducing the number false negatives.
1155 */
1156void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1157{
1158        pr_debug("%s(0x%p)\n", __func__, ptr);
1159
1160        if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1161                add_scan_area((unsigned long)ptr, size, gfp);
1162        else if (kmemleak_early_log)
1163                log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1164}
1165EXPORT_SYMBOL(kmemleak_scan_area);
1166
1167/**
1168 * kmemleak_no_scan - do not scan an allocated object
1169 * @ptr:        pointer to beginning of the object
1170 *
1171 * This function notifies kmemleak not to scan the given memory block. Useful
1172 * in situations where it is known that the given object does not contain any
1173 * references to other objects. Kmemleak will not scan such objects reducing
1174 * the number of false negatives.
1175 */
1176void __ref kmemleak_no_scan(const void *ptr)
1177{
1178        pr_debug("%s(0x%p)\n", __func__, ptr);
1179
1180        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1181                object_no_scan((unsigned long)ptr);
1182        else if (kmemleak_early_log)
1183                log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1184}
1185EXPORT_SYMBOL(kmemleak_no_scan);
1186
1187/**
1188 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1189 *                       address argument
1190 * @phys:       physical address of the object
1191 * @size:       size of the object
1192 * @min_count:  minimum number of references to this object.
1193 *              See kmemleak_alloc()
1194 * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1195 */
1196void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1197                               gfp_t gfp)
1198{
1199        if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1200                kmemleak_alloc(__va(phys), size, min_count, gfp);
1201}
1202EXPORT_SYMBOL(kmemleak_alloc_phys);
1203
1204/**
1205 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1206 *                           physical address argument
1207 * @phys:       physical address if the beginning or inside an object. This
1208 *              also represents the start of the range to be freed
1209 * @size:       size to be unregistered
1210 */
1211void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1212{
1213        if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1214                kmemleak_free_part(__va(phys), size);
1215}
1216EXPORT_SYMBOL(kmemleak_free_part_phys);
1217
1218/**
1219 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1220 *                          address argument
1221 * @phys:       physical address of the object
1222 */
1223void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1224{
1225        if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1226                kmemleak_not_leak(__va(phys));
1227}
1228EXPORT_SYMBOL(kmemleak_not_leak_phys);
1229
1230/**
1231 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1232 *                        address argument
1233 * @phys:       physical address of the object
1234 */
1235void __ref kmemleak_ignore_phys(phys_addr_t phys)
1236{
1237        if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1238                kmemleak_ignore(__va(phys));
1239}
1240EXPORT_SYMBOL(kmemleak_ignore_phys);
1241
1242/*
1243 * Update an object's checksum and return true if it was modified.
1244 */
1245static bool update_checksum(struct kmemleak_object *object)
1246{
1247        u32 old_csum = object->checksum;
1248
1249        kasan_disable_current();
1250        object->checksum = crc32(0, (void *)object->pointer, object->size);
1251        kasan_enable_current();
1252
1253        return object->checksum != old_csum;
1254}
1255
1256/*
1257 * Update an object's references. object->lock must be held by the caller.
1258 */
1259static void update_refs(struct kmemleak_object *object)
1260{
1261        if (!color_white(object)) {
1262                /* non-orphan, ignored or new */
1263                return;
1264        }
1265
1266        /*
1267         * Increase the object's reference count (number of pointers to the
1268         * memory block). If this count reaches the required minimum, the
1269         * object's color will become gray and it will be added to the
1270         * gray_list.
1271         */
1272        object->count++;
1273        if (color_gray(object)) {
1274                /* put_object() called when removing from gray_list */
1275                WARN_ON(!get_object(object));
1276                list_add_tail(&object->gray_list, &gray_list);
1277        }
1278}
1279
1280/*
1281 * Memory scanning is a long process and it needs to be interruptable. This
1282 * function checks whether such interrupt condition occurred.
1283 */
1284static int scan_should_stop(void)
1285{
1286        if (!kmemleak_enabled)
1287                return 1;
1288
1289        /*
1290         * This function may be called from either process or kthread context,
1291         * hence the need to check for both stop conditions.
1292         */
1293        if (current->mm)
1294                return signal_pending(current);
1295        else
1296                return kthread_should_stop();
1297
1298        return 0;
1299}
1300
1301/*
1302 * Scan a memory block (exclusive range) for valid pointers and add those
1303 * found to the gray list.
1304 */
1305static void scan_block(void *_start, void *_end,
1306                       struct kmemleak_object *scanned)
1307{
1308        unsigned long *ptr;
1309        unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1310        unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1311        unsigned long flags;
1312
1313        read_lock_irqsave(&kmemleak_lock, flags);
1314        for (ptr = start; ptr < end; ptr++) {
1315                struct kmemleak_object *object;
1316                unsigned long pointer;
1317                unsigned long excess_ref;
1318
1319                if (scan_should_stop())
1320                        break;
1321
1322                kasan_disable_current();
1323                pointer = *ptr;
1324                kasan_enable_current();
1325
1326                if (pointer < min_addr || pointer >= max_addr)
1327                        continue;
1328
1329                /*
1330                 * No need for get_object() here since we hold kmemleak_lock.
1331                 * object->use_count cannot be dropped to 0 while the object
1332                 * is still present in object_tree_root and object_list
1333                 * (with updates protected by kmemleak_lock).
1334                 */
1335                object = lookup_object(pointer, 1);
1336                if (!object)
1337                        continue;
1338                if (object == scanned)
1339                        /* self referenced, ignore */
1340                        continue;
1341
1342                /*
1343                 * Avoid the lockdep recursive warning on object->lock being
1344                 * previously acquired in scan_object(). These locks are
1345                 * enclosed by scan_mutex.
1346                 */
1347                spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1348                /* only pass surplus references (object already gray) */
1349                if (color_gray(object)) {
1350                        excess_ref = object->excess_ref;
1351                        /* no need for update_refs() if object already gray */
1352                } else {
1353                        excess_ref = 0;
1354                        update_refs(object);
1355                }
1356                spin_unlock(&object->lock);
1357
1358                if (excess_ref) {
1359                        object = lookup_object(excess_ref, 0);
1360                        if (!object)
1361                                continue;
1362                        if (object == scanned)
1363                                /* circular reference, ignore */
1364                                continue;
1365                        spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1366                        update_refs(object);
1367                        spin_unlock(&object->lock);
1368                }
1369        }
1370        read_unlock_irqrestore(&kmemleak_lock, flags);
1371}
1372
1373/*
1374 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1375 */
1376static void scan_large_block(void *start, void *end)
1377{
1378        void *next;
1379
1380        while (start < end) {
1381                next = min(start + MAX_SCAN_SIZE, end);
1382                scan_block(start, next, NULL);
1383                start = next;
1384                cond_resched();
1385        }
1386}
1387
1388/*
1389 * Scan a memory block corresponding to a kmemleak_object. A condition is
1390 * that object->use_count >= 1.
1391 */
1392static void scan_object(struct kmemleak_object *object)
1393{
1394        struct kmemleak_scan_area *area;
1395        unsigned long flags;
1396
1397        /*
1398         * Once the object->lock is acquired, the corresponding memory block
1399         * cannot be freed (the same lock is acquired in delete_object).
1400         */
1401        spin_lock_irqsave(&object->lock, flags);
1402        if (object->flags & OBJECT_NO_SCAN)
1403                goto out;
1404        if (!(object->flags & OBJECT_ALLOCATED))
1405                /* already freed object */
1406                goto out;
1407        if (hlist_empty(&object->area_list)) {
1408                void *start = (void *)object->pointer;
1409                void *end = (void *)(object->pointer + object->size);
1410                void *next;
1411
1412                do {
1413                        next = min(start + MAX_SCAN_SIZE, end);
1414                        scan_block(start, next, object);
1415
1416                        start = next;
1417                        if (start >= end)
1418                                break;
1419
1420                        spin_unlock_irqrestore(&object->lock, flags);
1421                        cond_resched();
1422                        spin_lock_irqsave(&object->lock, flags);
1423                } while (object->flags & OBJECT_ALLOCATED);
1424        } else
1425                hlist_for_each_entry(area, &object->area_list, node)
1426                        scan_block((void *)area->start,
1427                                   (void *)(area->start + area->size),
1428                                   object);
1429out:
1430        spin_unlock_irqrestore(&object->lock, flags);
1431}
1432
1433/*
1434 * Scan the objects already referenced (gray objects). More objects will be
1435 * referenced and, if there are no memory leaks, all the objects are scanned.
1436 */
1437static void scan_gray_list(void)
1438{
1439        struct kmemleak_object *object, *tmp;
1440
1441        /*
1442         * The list traversal is safe for both tail additions and removals
1443         * from inside the loop. The kmemleak objects cannot be freed from
1444         * outside the loop because their use_count was incremented.
1445         */
1446        object = list_entry(gray_list.next, typeof(*object), gray_list);
1447        while (&object->gray_list != &gray_list) {
1448                cond_resched();
1449
1450                /* may add new objects to the list */
1451                if (!scan_should_stop())
1452                        scan_object(object);
1453
1454                tmp = list_entry(object->gray_list.next, typeof(*object),
1455                                 gray_list);
1456
1457                /* remove the object from the list and release it */
1458                list_del(&object->gray_list);
1459                put_object(object);
1460
1461                object = tmp;
1462        }
1463        WARN_ON(!list_empty(&gray_list));
1464}
1465
1466/*
1467 * Scan data sections and all the referenced memory blocks allocated via the
1468 * kernel's standard allocators. This function must be called with the
1469 * scan_mutex held.
1470 */
1471static void kmemleak_scan(void)
1472{
1473        unsigned long flags;
1474        struct kmemleak_object *object;
1475        int i;
1476        int new_leaks = 0;
1477
1478        jiffies_last_scan = jiffies;
1479
1480        /* prepare the kmemleak_object's */
1481        rcu_read_lock();
1482        list_for_each_entry_rcu(object, &object_list, object_list) {
1483                spin_lock_irqsave(&object->lock, flags);
1484#ifdef DEBUG
1485                /*
1486                 * With a few exceptions there should be a maximum of
1487                 * 1 reference to any object at this point.
1488                 */
1489                if (atomic_read(&object->use_count) > 1) {
1490                        pr_debug("object->use_count = %d\n",
1491                                 atomic_read(&object->use_count));
1492                        dump_object_info(object);
1493                }
1494#endif
1495                /* reset the reference count (whiten the object) */
1496                object->count = 0;
1497                if (color_gray(object) && get_object(object))
1498                        list_add_tail(&object->gray_list, &gray_list);
1499
1500                spin_unlock_irqrestore(&object->lock, flags);
1501        }
1502        rcu_read_unlock();
1503
1504        /* data/bss scanning */
1505        scan_large_block(_sdata, _edata);
1506        scan_large_block(__bss_start, __bss_stop);
1507        scan_large_block(__start_ro_after_init, __end_ro_after_init);
1508
1509#ifdef CONFIG_SMP
1510        /* per-cpu sections scanning */
1511        for_each_possible_cpu(i)
1512                scan_large_block(__per_cpu_start + per_cpu_offset(i),
1513                                 __per_cpu_end + per_cpu_offset(i));
1514#endif
1515
1516        /*
1517         * Struct page scanning for each node.
1518         */
1519        get_online_mems();
1520        for_each_online_node(i) {
1521                unsigned long start_pfn = node_start_pfn(i);
1522                unsigned long end_pfn = node_end_pfn(i);
1523                unsigned long pfn;
1524
1525                for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1526                        struct page *page;
1527
1528                        if (!pfn_valid(pfn))
1529                                continue;
1530                        page = pfn_to_page(pfn);
1531                        /* only scan if page is in use */
1532                        if (page_count(page) == 0)
1533                                continue;
1534                        scan_block(page, page + 1, NULL);
1535                        if (!(pfn & 63))
1536                                cond_resched();
1537                }
1538        }
1539        put_online_mems();
1540
1541        /*
1542         * Scanning the task stacks (may introduce false negatives).
1543         */
1544        if (kmemleak_stack_scan) {
1545                struct task_struct *p, *g;
1546
1547                read_lock(&tasklist_lock);
1548                do_each_thread(g, p) {
1549                        void *stack = try_get_task_stack(p);
1550                        if (stack) {
1551                                scan_block(stack, stack + THREAD_SIZE, NULL);
1552                                put_task_stack(p);
1553                        }
1554                } while_each_thread(g, p);
1555                read_unlock(&tasklist_lock);
1556        }
1557
1558        /*
1559         * Scan the objects already referenced from the sections scanned
1560         * above.
1561         */
1562        scan_gray_list();
1563
1564        /*
1565         * Check for new or unreferenced objects modified since the previous
1566         * scan and color them gray until the next scan.
1567         */
1568        rcu_read_lock();
1569        list_for_each_entry_rcu(object, &object_list, object_list) {
1570                spin_lock_irqsave(&object->lock, flags);
1571                if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1572                    && update_checksum(object) && get_object(object)) {
1573                        /* color it gray temporarily */
1574                        object->count = object->min_count;
1575                        list_add_tail(&object->gray_list, &gray_list);
1576                }
1577                spin_unlock_irqrestore(&object->lock, flags);
1578        }
1579        rcu_read_unlock();
1580
1581        /*
1582         * Re-scan the gray list for modified unreferenced objects.
1583         */
1584        scan_gray_list();
1585
1586        /*
1587         * If scanning was stopped do not report any new unreferenced objects.
1588         */
1589        if (scan_should_stop())
1590                return;
1591
1592        /*
1593         * Scanning result reporting.
1594         */
1595        rcu_read_lock();
1596        list_for_each_entry_rcu(object, &object_list, object_list) {
1597                spin_lock_irqsave(&object->lock, flags);
1598                if (unreferenced_object(object) &&
1599                    !(object->flags & OBJECT_REPORTED)) {
1600                        object->flags |= OBJECT_REPORTED;
1601                        new_leaks++;
1602                }
1603                spin_unlock_irqrestore(&object->lock, flags);
1604        }
1605        rcu_read_unlock();
1606
1607        if (new_leaks) {
1608                kmemleak_found_leaks = true;
1609
1610                pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1611                        new_leaks);
1612        }
1613
1614}
1615
1616/*
1617 * Thread function performing automatic memory scanning. Unreferenced objects
1618 * at the end of a memory scan are reported but only the first time.
1619 */
1620static int kmemleak_scan_thread(void *arg)
1621{
1622        static int first_run = 1;
1623
1624        pr_info("Automatic memory scanning thread started\n");
1625        set_user_nice(current, 10);
1626
1627        /*
1628         * Wait before the first scan to allow the system to fully initialize.
1629         */
1630        if (first_run) {
1631                signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1632                first_run = 0;
1633                while (timeout && !kthread_should_stop())
1634                        timeout = schedule_timeout_interruptible(timeout);
1635        }
1636
1637        while (!kthread_should_stop()) {
1638                signed long timeout = jiffies_scan_wait;
1639
1640                mutex_lock(&scan_mutex);
1641                kmemleak_scan();
1642                mutex_unlock(&scan_mutex);
1643
1644                /* wait before the next scan */
1645                while (timeout && !kthread_should_stop())
1646                        timeout = schedule_timeout_interruptible(timeout);
1647        }
1648
1649        pr_info("Automatic memory scanning thread ended\n");
1650
1651        return 0;
1652}
1653
1654/*
1655 * Start the automatic memory scanning thread. This function must be called
1656 * with the scan_mutex held.
1657 */
1658static void start_scan_thread(void)
1659{
1660        if (scan_thread)
1661                return;
1662        scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1663        if (IS_ERR(scan_thread)) {
1664                pr_warn("Failed to create the scan thread\n");
1665                scan_thread = NULL;
1666        }
1667}
1668
1669/*
1670 * Stop the automatic memory scanning thread.
1671 */
1672static void stop_scan_thread(void)
1673{
1674        if (scan_thread) {
1675                kthread_stop(scan_thread);
1676                scan_thread = NULL;
1677        }
1678}
1679
1680/*
1681 * Iterate over the object_list and return the first valid object at or after
1682 * the required position with its use_count incremented. The function triggers
1683 * a memory scanning when the pos argument points to the first position.
1684 */
1685static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1686{
1687        struct kmemleak_object *object;
1688        loff_t n = *pos;
1689        int err;
1690
1691        err = mutex_lock_interruptible(&scan_mutex);
1692        if (err < 0)
1693                return ERR_PTR(err);
1694
1695        rcu_read_lock();
1696        list_for_each_entry_rcu(object, &object_list, object_list) {
1697                if (n-- > 0)
1698                        continue;
1699                if (get_object(object))
1700                        goto out;
1701        }
1702        object = NULL;
1703out:
1704        return object;
1705}
1706
1707/*
1708 * Return the next object in the object_list. The function decrements the
1709 * use_count of the previous object and increases that of the next one.
1710 */
1711static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1712{
1713        struct kmemleak_object *prev_obj = v;
1714        struct kmemleak_object *next_obj = NULL;
1715        struct kmemleak_object *obj = prev_obj;
1716
1717        ++(*pos);
1718
1719        list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1720                if (get_object(obj)) {
1721                        next_obj = obj;
1722                        break;
1723                }
1724        }
1725
1726        put_object(prev_obj);
1727        return next_obj;
1728}
1729
1730/*
1731 * Decrement the use_count of the last object required, if any.
1732 */
1733static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1734{
1735        if (!IS_ERR(v)) {
1736                /*
1737                 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1738                 * waiting was interrupted, so only release it if !IS_ERR.
1739                 */
1740                rcu_read_unlock();
1741                mutex_unlock(&scan_mutex);
1742                if (v)
1743                        put_object(v);
1744        }
1745}
1746
1747/*
1748 * Print the information for an unreferenced object to the seq file.
1749 */
1750static int kmemleak_seq_show(struct seq_file *seq, void *v)
1751{
1752        struct kmemleak_object *object = v;
1753        unsigned long flags;
1754
1755        spin_lock_irqsave(&object->lock, flags);
1756        if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1757                print_unreferenced(seq, object);
1758        spin_unlock_irqrestore(&object->lock, flags);
1759        return 0;
1760}
1761
1762static const struct seq_operations kmemleak_seq_ops = {
1763        .start = kmemleak_seq_start,
1764        .next  = kmemleak_seq_next,
1765        .stop  = kmemleak_seq_stop,
1766        .show  = kmemleak_seq_show,
1767};
1768
1769static int kmemleak_open(struct inode *inode, struct file *file)
1770{
1771        return seq_open(file, &kmemleak_seq_ops);
1772}
1773
1774static int dump_str_object_info(const char *str)
1775{
1776        unsigned long flags;
1777        struct kmemleak_object *object;
1778        unsigned long addr;
1779
1780        if (kstrtoul(str, 0, &addr))
1781                return -EINVAL;
1782        object = find_and_get_object(addr, 0);
1783        if (!object) {
1784                pr_info("Unknown object at 0x%08lx\n", addr);
1785                return -EINVAL;
1786        }
1787
1788        spin_lock_irqsave(&object->lock, flags);
1789        dump_object_info(object);
1790        spin_unlock_irqrestore(&object->lock, flags);
1791
1792        put_object(object);
1793        return 0;
1794}
1795
1796/*
1797 * We use grey instead of black to ensure we can do future scans on the same
1798 * objects. If we did not do future scans these black objects could
1799 * potentially contain references to newly allocated objects in the future and
1800 * we'd end up with false positives.
1801 */
1802static void kmemleak_clear(void)
1803{
1804        struct kmemleak_object *object;
1805        unsigned long flags;
1806
1807        rcu_read_lock();
1808        list_for_each_entry_rcu(object, &object_list, object_list) {
1809                spin_lock_irqsave(&object->lock, flags);
1810                if ((object->flags & OBJECT_REPORTED) &&
1811                    unreferenced_object(object))
1812                        __paint_it(object, KMEMLEAK_GREY);
1813                spin_unlock_irqrestore(&object->lock, flags);
1814        }
1815        rcu_read_unlock();
1816
1817        kmemleak_found_leaks = false;
1818}
1819
1820static void __kmemleak_do_cleanup(void);
1821
1822/*
1823 * File write operation to configure kmemleak at run-time. The following
1824 * commands can be written to the /sys/kernel/debug/kmemleak file:
1825 *   off        - disable kmemleak (irreversible)
1826 *   stack=on   - enable the task stacks scanning
1827 *   stack=off  - disable the tasks stacks scanning
1828 *   scan=on    - start the automatic memory scanning thread
1829 *   scan=off   - stop the automatic memory scanning thread
1830 *   scan=...   - set the automatic memory scanning period in seconds (0 to
1831 *                disable it)
1832 *   scan       - trigger a memory scan
1833 *   clear      - mark all current reported unreferenced kmemleak objects as
1834 *                grey to ignore printing them, or free all kmemleak objects
1835 *                if kmemleak has been disabled.
1836 *   dump=...   - dump information about the object found at the given address
1837 */
1838static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1839                              size_t size, loff_t *ppos)
1840{
1841        char buf[64];
1842        int buf_size;
1843        int ret;
1844
1845        buf_size = min(size, (sizeof(buf) - 1));
1846        if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1847                return -EFAULT;
1848        buf[buf_size] = 0;
1849
1850        ret = mutex_lock_interruptible(&scan_mutex);
1851        if (ret < 0)
1852                return ret;
1853
1854        if (strncmp(buf, "clear", 5) == 0) {
1855                if (kmemleak_enabled)
1856                        kmemleak_clear();
1857                else
1858                        __kmemleak_do_cleanup();
1859                goto out;
1860        }
1861
1862        if (!kmemleak_enabled) {
1863                ret = -EBUSY;
1864                goto out;
1865        }
1866
1867        if (strncmp(buf, "off", 3) == 0)
1868                kmemleak_disable();
1869        else if (strncmp(buf, "stack=on", 8) == 0)
1870                kmemleak_stack_scan = 1;
1871        else if (strncmp(buf, "stack=off", 9) == 0)
1872                kmemleak_stack_scan = 0;
1873        else if (strncmp(buf, "scan=on", 7) == 0)
1874                start_scan_thread();
1875        else if (strncmp(buf, "scan=off", 8) == 0)
1876                stop_scan_thread();
1877        else if (strncmp(buf, "scan=", 5) == 0) {
1878                unsigned long secs;
1879
1880                ret = kstrtoul(buf + 5, 0, &secs);
1881                if (ret < 0)
1882                        goto out;
1883                stop_scan_thread();
1884                if (secs) {
1885                        jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1886                        start_scan_thread();
1887                }
1888        } else if (strncmp(buf, "scan", 4) == 0)
1889                kmemleak_scan();
1890        else if (strncmp(buf, "dump=", 5) == 0)
1891                ret = dump_str_object_info(buf + 5);
1892        else
1893                ret = -EINVAL;
1894
1895out:
1896        mutex_unlock(&scan_mutex);
1897        if (ret < 0)
1898                return ret;
1899
1900        /* ignore the rest of the buffer, only one command at a time */
1901        *ppos += size;
1902        return size;
1903}
1904
1905static const struct file_operations kmemleak_fops = {
1906        .owner          = THIS_MODULE,
1907        .open           = kmemleak_open,
1908        .read           = seq_read,
1909        .write          = kmemleak_write,
1910        .llseek         = seq_lseek,
1911        .release        = seq_release,
1912};
1913
1914static void __kmemleak_do_cleanup(void)
1915{
1916        struct kmemleak_object *object;
1917
1918        rcu_read_lock();
1919        list_for_each_entry_rcu(object, &object_list, object_list)
1920                delete_object_full(object->pointer);
1921        rcu_read_unlock();
1922}
1923
1924/*
1925 * Stop the memory scanning thread and free the kmemleak internal objects if
1926 * no previous scan thread (otherwise, kmemleak may still have some useful
1927 * information on memory leaks).
1928 */
1929static void kmemleak_do_cleanup(struct work_struct *work)
1930{
1931        stop_scan_thread();
1932
1933        mutex_lock(&scan_mutex);
1934        /*
1935         * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1936         * longer track object freeing. Ordering of the scan thread stopping and
1937         * the memory accesses below is guaranteed by the kthread_stop()
1938         * function.
1939         */
1940        kmemleak_free_enabled = 0;
1941        mutex_unlock(&scan_mutex);
1942
1943        if (!kmemleak_found_leaks)
1944                __kmemleak_do_cleanup();
1945        else
1946                pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1947}
1948
1949static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1950
1951/*
1952 * Disable kmemleak. No memory allocation/freeing will be traced once this
1953 * function is called. Disabling kmemleak is an irreversible operation.
1954 */
1955static void kmemleak_disable(void)
1956{
1957        /* atomically check whether it was already invoked */
1958        if (cmpxchg(&kmemleak_error, 0, 1))
1959                return;
1960
1961        /* stop any memory operation tracing */
1962        kmemleak_enabled = 0;
1963
1964        /* check whether it is too early for a kernel thread */
1965        if (kmemleak_initialized)
1966                schedule_work(&cleanup_work);
1967        else
1968                kmemleak_free_enabled = 0;
1969
1970        pr_info("Kernel memory leak detector disabled\n");
1971}
1972
1973/*
1974 * Allow boot-time kmemleak disabling (enabled by default).
1975 */
1976static int __init kmemleak_boot_config(char *str)
1977{
1978        if (!str)
1979                return -EINVAL;
1980        if (strcmp(str, "off") == 0)
1981                kmemleak_disable();
1982        else if (strcmp(str, "on") == 0)
1983                kmemleak_skip_disable = 1;
1984        else
1985                return -EINVAL;
1986        return 0;
1987}
1988early_param("kmemleak", kmemleak_boot_config);
1989
1990static void __init print_log_trace(struct early_log *log)
1991{
1992        struct stack_trace trace;
1993
1994        trace.nr_entries = log->trace_len;
1995        trace.entries = log->trace;
1996
1997        pr_notice("Early log backtrace:\n");
1998        print_stack_trace(&trace, 2);
1999}
2000
2001/*
2002 * Kmemleak initialization.
2003 */
2004void __init kmemleak_init(void)
2005{
2006        int i;
2007        unsigned long flags;
2008
2009#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2010        if (!kmemleak_skip_disable) {
2011                kmemleak_early_log = 0;
2012                kmemleak_disable();
2013                return;
2014        }
2015#endif
2016
2017        jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2018        jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2019
2020        object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2021        scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2022
2023        if (crt_early_log > ARRAY_SIZE(early_log))
2024                pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2025                        crt_early_log);
2026
2027        /* the kernel is still in UP mode, so disabling the IRQs is enough */
2028        local_irq_save(flags);
2029        kmemleak_early_log = 0;
2030        if (kmemleak_error) {
2031                local_irq_restore(flags);
2032                return;
2033        } else {
2034                kmemleak_enabled = 1;
2035                kmemleak_free_enabled = 1;
2036        }
2037        local_irq_restore(flags);
2038
2039        /*
2040         * This is the point where tracking allocations is safe. Automatic
2041         * scanning is started during the late initcall. Add the early logged
2042         * callbacks to the kmemleak infrastructure.
2043         */
2044        for (i = 0; i < crt_early_log; i++) {
2045                struct early_log *log = &early_log[i];
2046
2047                switch (log->op_type) {
2048                case KMEMLEAK_ALLOC:
2049                        early_alloc(log);
2050                        break;
2051                case KMEMLEAK_ALLOC_PERCPU:
2052                        early_alloc_percpu(log);
2053                        break;
2054                case KMEMLEAK_FREE:
2055                        kmemleak_free(log->ptr);
2056                        break;
2057                case KMEMLEAK_FREE_PART:
2058                        kmemleak_free_part(log->ptr, log->size);
2059                        break;
2060                case KMEMLEAK_FREE_PERCPU:
2061                        kmemleak_free_percpu(log->ptr);
2062                        break;
2063                case KMEMLEAK_NOT_LEAK:
2064                        kmemleak_not_leak(log->ptr);
2065                        break;
2066                case KMEMLEAK_IGNORE:
2067                        kmemleak_ignore(log->ptr);
2068                        break;
2069                case KMEMLEAK_SCAN_AREA:
2070                        kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
2071                        break;
2072                case KMEMLEAK_NO_SCAN:
2073                        kmemleak_no_scan(log->ptr);
2074                        break;
2075                case KMEMLEAK_SET_EXCESS_REF:
2076                        object_set_excess_ref((unsigned long)log->ptr,
2077                                              log->excess_ref);
2078                        break;
2079                default:
2080                        kmemleak_warn("Unknown early log operation: %d\n",
2081                                      log->op_type);
2082                }
2083
2084                if (kmemleak_warning) {
2085                        print_log_trace(log);
2086                        kmemleak_warning = 0;
2087                }
2088        }
2089}
2090
2091/*
2092 * Late initialization function.
2093 */
2094static int __init kmemleak_late_init(void)
2095{
2096        struct dentry *dentry;
2097
2098        kmemleak_initialized = 1;
2099
2100        dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
2101                                     &kmemleak_fops);
2102        if (!dentry)
2103                pr_warn("Failed to create the debugfs kmemleak file\n");
2104
2105        if (kmemleak_error) {
2106                /*
2107                 * Some error occurred and kmemleak was disabled. There is a
2108                 * small chance that kmemleak_disable() was called immediately
2109                 * after setting kmemleak_initialized and we may end up with
2110                 * two clean-up threads but serialized by scan_mutex.
2111                 */
2112                schedule_work(&cleanup_work);
2113                return -ENOMEM;
2114        }
2115
2116        mutex_lock(&scan_mutex);
2117        start_scan_thread();
2118        mutex_unlock(&scan_mutex);
2119
2120        pr_info("Kernel memory leak detector initialized\n");
2121
2122        return 0;
2123}
2124late_initcall(kmemleak_late_init);
2125