linux/mm/kmemleak.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/kmemleak.c
   4 *
   5 * Copyright (C) 2008 ARM Limited
   6 * Written by Catalin Marinas <catalin.marinas@arm.com>
   7 *
   8 * For more information on the algorithm and kmemleak usage, please see
   9 * Documentation/dev-tools/kmemleak.rst.
  10 *
  11 * Notes on locking
  12 * ----------------
  13 *
  14 * The following locks and mutexes are used by kmemleak:
  15 *
  16 * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
  17 *   accesses to the object_tree_root. The object_list is the main list
  18 *   holding the metadata (struct kmemleak_object) for the allocated memory
  19 *   blocks. The object_tree_root is a red black tree used to look-up
  20 *   metadata based on a pointer to the corresponding memory block.  The
  21 *   kmemleak_object structures are added to the object_list and
  22 *   object_tree_root in the create_object() function called from the
  23 *   kmemleak_alloc() callback and removed in delete_object() called from the
  24 *   kmemleak_free() callback
  25 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
  26 *   Accesses to the metadata (e.g. count) are protected by this lock. Note
  27 *   that some members of this structure may be protected by other means
  28 *   (atomic or kmemleak_lock). This lock is also held when scanning the
  29 *   corresponding memory block to avoid the kernel freeing it via the
  30 *   kmemleak_free() callback. This is less heavyweight than holding a global
  31 *   lock like kmemleak_lock during scanning.
  32 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
  33 *   unreferenced objects at a time. The gray_list contains the objects which
  34 *   are already referenced or marked as false positives and need to be
  35 *   scanned. This list is only modified during a scanning episode when the
  36 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
  37 *   Note that the kmemleak_object.use_count is incremented when an object is
  38 *   added to the gray_list and therefore cannot be freed. This mutex also
  39 *   prevents multiple users of the "kmemleak" debugfs file together with
  40 *   modifications to the memory scanning parameters including the scan_thread
  41 *   pointer
  42 *
  43 * Locks and mutexes are acquired/nested in the following order:
  44 *
  45 *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
  46 *
  47 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
  48 * regions.
  49 *
  50 * The kmemleak_object structures have a use_count incremented or decremented
  51 * using the get_object()/put_object() functions. When the use_count becomes
  52 * 0, this count can no longer be incremented and put_object() schedules the
  53 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
  54 * function must be protected by rcu_read_lock() to avoid accessing a freed
  55 * structure.
  56 */
  57
  58#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  59
  60#include <linux/init.h>
  61#include <linux/kernel.h>
  62#include <linux/list.h>
  63#include <linux/sched/signal.h>
  64#include <linux/sched/task.h>
  65#include <linux/sched/task_stack.h>
  66#include <linux/jiffies.h>
  67#include <linux/delay.h>
  68#include <linux/export.h>
  69#include <linux/kthread.h>
  70#include <linux/rbtree.h>
  71#include <linux/fs.h>
  72#include <linux/debugfs.h>
  73#include <linux/seq_file.h>
  74#include <linux/cpumask.h>
  75#include <linux/spinlock.h>
  76#include <linux/module.h>
  77#include <linux/mutex.h>
  78#include <linux/rcupdate.h>
  79#include <linux/stacktrace.h>
  80#include <linux/cache.h>
  81#include <linux/percpu.h>
  82#include <linux/memblock.h>
  83#include <linux/pfn.h>
  84#include <linux/mmzone.h>
  85#include <linux/slab.h>
  86#include <linux/thread_info.h>
  87#include <linux/err.h>
  88#include <linux/uaccess.h>
  89#include <linux/string.h>
  90#include <linux/nodemask.h>
  91#include <linux/mm.h>
  92#include <linux/workqueue.h>
  93#include <linux/crc32.h>
  94
  95#include <asm/sections.h>
  96#include <asm/processor.h>
  97#include <linux/atomic.h>
  98
  99#include <linux/kasan.h>
 100#include <linux/kfence.h>
 101#include <linux/kmemleak.h>
 102#include <linux/memory_hotplug.h>
 103
 104/*
 105 * Kmemleak configuration and common defines.
 106 */
 107#define MAX_TRACE               16      /* stack trace length */
 108#define MSECS_MIN_AGE           5000    /* minimum object age for reporting */
 109#define SECS_FIRST_SCAN         60      /* delay before the first scan */
 110#define SECS_SCAN_WAIT          600     /* subsequent auto scanning delay */
 111#define MAX_SCAN_SIZE           4096    /* maximum size of a scanned block */
 112
 113#define BYTES_PER_POINTER       sizeof(void *)
 114
 115/* GFP bitmask for kmemleak internal allocations */
 116#define gfp_kmemleak_mask(gfp)  (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
 117                                           __GFP_NOLOCKDEP)) | \
 118                                 __GFP_NORETRY | __GFP_NOMEMALLOC | \
 119                                 __GFP_NOWARN)
 120
 121/* scanning area inside a memory block */
 122struct kmemleak_scan_area {
 123        struct hlist_node node;
 124        unsigned long start;
 125        size_t size;
 126};
 127
 128#define KMEMLEAK_GREY   0
 129#define KMEMLEAK_BLACK  -1
 130
 131/*
 132 * Structure holding the metadata for each allocated memory block.
 133 * Modifications to such objects should be made while holding the
 134 * object->lock. Insertions or deletions from object_list, gray_list or
 135 * rb_node are already protected by the corresponding locks or mutex (see
 136 * the notes on locking above). These objects are reference-counted
 137 * (use_count) and freed using the RCU mechanism.
 138 */
 139struct kmemleak_object {
 140        raw_spinlock_t lock;
 141        unsigned int flags;             /* object status flags */
 142        struct list_head object_list;
 143        struct list_head gray_list;
 144        struct rb_node rb_node;
 145        struct rcu_head rcu;            /* object_list lockless traversal */
 146        /* object usage count; object freed when use_count == 0 */
 147        atomic_t use_count;
 148        unsigned long pointer;
 149        size_t size;
 150        /* pass surplus references to this pointer */
 151        unsigned long excess_ref;
 152        /* minimum number of a pointers found before it is considered leak */
 153        int min_count;
 154        /* the total number of pointers found pointing to this object */
 155        int count;
 156        /* checksum for detecting modified objects */
 157        u32 checksum;
 158        /* memory ranges to be scanned inside an object (empty for all) */
 159        struct hlist_head area_list;
 160        unsigned long trace[MAX_TRACE];
 161        unsigned int trace_len;
 162        unsigned long jiffies;          /* creation timestamp */
 163        pid_t pid;                      /* pid of the current task */
 164        char comm[TASK_COMM_LEN];       /* executable name */
 165};
 166
 167/* flag representing the memory block allocation status */
 168#define OBJECT_ALLOCATED        (1 << 0)
 169/* flag set after the first reporting of an unreference object */
 170#define OBJECT_REPORTED         (1 << 1)
 171/* flag set to not scan the object */
 172#define OBJECT_NO_SCAN          (1 << 2)
 173/* flag set to fully scan the object when scan_area allocation failed */
 174#define OBJECT_FULL_SCAN        (1 << 3)
 175
 176#define HEX_PREFIX              "    "
 177/* number of bytes to print per line; must be 16 or 32 */
 178#define HEX_ROW_SIZE            16
 179/* number of bytes to print at a time (1, 2, 4, 8) */
 180#define HEX_GROUP_SIZE          1
 181/* include ASCII after the hex output */
 182#define HEX_ASCII               1
 183/* max number of lines to be printed */
 184#define HEX_MAX_LINES           2
 185
 186/* the list of all allocated objects */
 187static LIST_HEAD(object_list);
 188/* the list of gray-colored objects (see color_gray comment below) */
 189static LIST_HEAD(gray_list);
 190/* memory pool allocation */
 191static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
 192static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
 193static LIST_HEAD(mem_pool_free_list);
 194/* search tree for object boundaries */
 195static struct rb_root object_tree_root = RB_ROOT;
 196/* protecting the access to object_list and object_tree_root */
 197static DEFINE_RAW_SPINLOCK(kmemleak_lock);
 198
 199/* allocation caches for kmemleak internal data */
 200static struct kmem_cache *object_cache;
 201static struct kmem_cache *scan_area_cache;
 202
 203/* set if tracing memory operations is enabled */
 204static int kmemleak_enabled = 1;
 205/* same as above but only for the kmemleak_free() callback */
 206static int kmemleak_free_enabled = 1;
 207/* set in the late_initcall if there were no errors */
 208static int kmemleak_initialized;
 209/* set if a kmemleak warning was issued */
 210static int kmemleak_warning;
 211/* set if a fatal kmemleak error has occurred */
 212static int kmemleak_error;
 213
 214/* minimum and maximum address that may be valid pointers */
 215static unsigned long min_addr = ULONG_MAX;
 216static unsigned long max_addr;
 217
 218static struct task_struct *scan_thread;
 219/* used to avoid reporting of recently allocated objects */
 220static unsigned long jiffies_min_age;
 221static unsigned long jiffies_last_scan;
 222/* delay between automatic memory scannings */
 223static unsigned long jiffies_scan_wait;
 224/* enables or disables the task stacks scanning */
 225static int kmemleak_stack_scan = 1;
 226/* protects the memory scanning, parameters and debug/kmemleak file access */
 227static DEFINE_MUTEX(scan_mutex);
 228/* setting kmemleak=on, will set this var, skipping the disable */
 229static int kmemleak_skip_disable;
 230/* If there are leaks that can be reported */
 231static bool kmemleak_found_leaks;
 232
 233static bool kmemleak_verbose;
 234module_param_named(verbose, kmemleak_verbose, bool, 0600);
 235
 236static void kmemleak_disable(void);
 237
 238/*
 239 * Print a warning and dump the stack trace.
 240 */
 241#define kmemleak_warn(x...)     do {            \
 242        pr_warn(x);                             \
 243        dump_stack();                           \
 244        kmemleak_warning = 1;                   \
 245} while (0)
 246
 247/*
 248 * Macro invoked when a serious kmemleak condition occurred and cannot be
 249 * recovered from. Kmemleak will be disabled and further allocation/freeing
 250 * tracing no longer available.
 251 */
 252#define kmemleak_stop(x...)     do {    \
 253        kmemleak_warn(x);               \
 254        kmemleak_disable();             \
 255} while (0)
 256
 257#define warn_or_seq_printf(seq, fmt, ...)       do {    \
 258        if (seq)                                        \
 259                seq_printf(seq, fmt, ##__VA_ARGS__);    \
 260        else                                            \
 261                pr_warn(fmt, ##__VA_ARGS__);            \
 262} while (0)
 263
 264static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
 265                                 int rowsize, int groupsize, const void *buf,
 266                                 size_t len, bool ascii)
 267{
 268        if (seq)
 269                seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
 270                             buf, len, ascii);
 271        else
 272                print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
 273                               rowsize, groupsize, buf, len, ascii);
 274}
 275
 276/*
 277 * Printing of the objects hex dump to the seq file. The number of lines to be
 278 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
 279 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
 280 * with the object->lock held.
 281 */
 282static void hex_dump_object(struct seq_file *seq,
 283                            struct kmemleak_object *object)
 284{
 285        const u8 *ptr = (const u8 *)object->pointer;
 286        size_t len;
 287
 288        /* limit the number of lines to HEX_MAX_LINES */
 289        len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
 290
 291        warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
 292        kasan_disable_current();
 293        warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
 294                             HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
 295        kasan_enable_current();
 296}
 297
 298/*
 299 * Object colors, encoded with count and min_count:
 300 * - white - orphan object, not enough references to it (count < min_count)
 301 * - gray  - not orphan, not marked as false positive (min_count == 0) or
 302 *              sufficient references to it (count >= min_count)
 303 * - black - ignore, it doesn't contain references (e.g. text section)
 304 *              (min_count == -1). No function defined for this color.
 305 * Newly created objects don't have any color assigned (object->count == -1)
 306 * before the next memory scan when they become white.
 307 */
 308static bool color_white(const struct kmemleak_object *object)
 309{
 310        return object->count != KMEMLEAK_BLACK &&
 311                object->count < object->min_count;
 312}
 313
 314static bool color_gray(const struct kmemleak_object *object)
 315{
 316        return object->min_count != KMEMLEAK_BLACK &&
 317                object->count >= object->min_count;
 318}
 319
 320/*
 321 * Objects are considered unreferenced only if their color is white, they have
 322 * not be deleted and have a minimum age to avoid false positives caused by
 323 * pointers temporarily stored in CPU registers.
 324 */
 325static bool unreferenced_object(struct kmemleak_object *object)
 326{
 327        return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
 328                time_before_eq(object->jiffies + jiffies_min_age,
 329                               jiffies_last_scan);
 330}
 331
 332/*
 333 * Printing of the unreferenced objects information to the seq file. The
 334 * print_unreferenced function must be called with the object->lock held.
 335 */
 336static void print_unreferenced(struct seq_file *seq,
 337                               struct kmemleak_object *object)
 338{
 339        int i;
 340        unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
 341
 342        warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
 343                   object->pointer, object->size);
 344        warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
 345                   object->comm, object->pid, object->jiffies,
 346                   msecs_age / 1000, msecs_age % 1000);
 347        hex_dump_object(seq, object);
 348        warn_or_seq_printf(seq, "  backtrace:\n");
 349
 350        for (i = 0; i < object->trace_len; i++) {
 351                void *ptr = (void *)object->trace[i];
 352                warn_or_seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
 353        }
 354}
 355
 356/*
 357 * Print the kmemleak_object information. This function is used mainly for
 358 * debugging special cases when kmemleak operations. It must be called with
 359 * the object->lock held.
 360 */
 361static void dump_object_info(struct kmemleak_object *object)
 362{
 363        pr_notice("Object 0x%08lx (size %zu):\n",
 364                  object->pointer, object->size);
 365        pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
 366                  object->comm, object->pid, object->jiffies);
 367        pr_notice("  min_count = %d\n", object->min_count);
 368        pr_notice("  count = %d\n", object->count);
 369        pr_notice("  flags = 0x%x\n", object->flags);
 370        pr_notice("  checksum = %u\n", object->checksum);
 371        pr_notice("  backtrace:\n");
 372        stack_trace_print(object->trace, object->trace_len, 4);
 373}
 374
 375/*
 376 * Look-up a memory block metadata (kmemleak_object) in the object search
 377 * tree based on a pointer value. If alias is 0, only values pointing to the
 378 * beginning of the memory block are allowed. The kmemleak_lock must be held
 379 * when calling this function.
 380 */
 381static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
 382{
 383        struct rb_node *rb = object_tree_root.rb_node;
 384
 385        while (rb) {
 386                struct kmemleak_object *object =
 387                        rb_entry(rb, struct kmemleak_object, rb_node);
 388                if (ptr < object->pointer)
 389                        rb = object->rb_node.rb_left;
 390                else if (object->pointer + object->size <= ptr)
 391                        rb = object->rb_node.rb_right;
 392                else if (object->pointer == ptr || alias)
 393                        return object;
 394                else {
 395                        kmemleak_warn("Found object by alias at 0x%08lx\n",
 396                                      ptr);
 397                        dump_object_info(object);
 398                        break;
 399                }
 400        }
 401        return NULL;
 402}
 403
 404/*
 405 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
 406 * that once an object's use_count reached 0, the RCU freeing was already
 407 * registered and the object should no longer be used. This function must be
 408 * called under the protection of rcu_read_lock().
 409 */
 410static int get_object(struct kmemleak_object *object)
 411{
 412        return atomic_inc_not_zero(&object->use_count);
 413}
 414
 415/*
 416 * Memory pool allocation and freeing. kmemleak_lock must not be held.
 417 */
 418static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
 419{
 420        unsigned long flags;
 421        struct kmemleak_object *object;
 422
 423        /* try the slab allocator first */
 424        if (object_cache) {
 425                object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
 426                if (object)
 427                        return object;
 428        }
 429
 430        /* slab allocation failed, try the memory pool */
 431        raw_spin_lock_irqsave(&kmemleak_lock, flags);
 432        object = list_first_entry_or_null(&mem_pool_free_list,
 433                                          typeof(*object), object_list);
 434        if (object)
 435                list_del(&object->object_list);
 436        else if (mem_pool_free_count)
 437                object = &mem_pool[--mem_pool_free_count];
 438        else
 439                pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
 440        raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 441
 442        return object;
 443}
 444
 445/*
 446 * Return the object to either the slab allocator or the memory pool.
 447 */
 448static void mem_pool_free(struct kmemleak_object *object)
 449{
 450        unsigned long flags;
 451
 452        if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
 453                kmem_cache_free(object_cache, object);
 454                return;
 455        }
 456
 457        /* add the object to the memory pool free list */
 458        raw_spin_lock_irqsave(&kmemleak_lock, flags);
 459        list_add(&object->object_list, &mem_pool_free_list);
 460        raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 461}
 462
 463/*
 464 * RCU callback to free a kmemleak_object.
 465 */
 466static void free_object_rcu(struct rcu_head *rcu)
 467{
 468        struct hlist_node *tmp;
 469        struct kmemleak_scan_area *area;
 470        struct kmemleak_object *object =
 471                container_of(rcu, struct kmemleak_object, rcu);
 472
 473        /*
 474         * Once use_count is 0 (guaranteed by put_object), there is no other
 475         * code accessing this object, hence no need for locking.
 476         */
 477        hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
 478                hlist_del(&area->node);
 479                kmem_cache_free(scan_area_cache, area);
 480        }
 481        mem_pool_free(object);
 482}
 483
 484/*
 485 * Decrement the object use_count. Once the count is 0, free the object using
 486 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
 487 * delete_object() path, the delayed RCU freeing ensures that there is no
 488 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
 489 * is also possible.
 490 */
 491static void put_object(struct kmemleak_object *object)
 492{
 493        if (!atomic_dec_and_test(&object->use_count))
 494                return;
 495
 496        /* should only get here after delete_object was called */
 497        WARN_ON(object->flags & OBJECT_ALLOCATED);
 498
 499        /*
 500         * It may be too early for the RCU callbacks, however, there is no
 501         * concurrent object_list traversal when !object_cache and all objects
 502         * came from the memory pool. Free the object directly.
 503         */
 504        if (object_cache)
 505                call_rcu(&object->rcu, free_object_rcu);
 506        else
 507                free_object_rcu(&object->rcu);
 508}
 509
 510/*
 511 * Look up an object in the object search tree and increase its use_count.
 512 */
 513static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 514{
 515        unsigned long flags;
 516        struct kmemleak_object *object;
 517
 518        rcu_read_lock();
 519        raw_spin_lock_irqsave(&kmemleak_lock, flags);
 520        object = lookup_object(ptr, alias);
 521        raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 522
 523        /* check whether the object is still available */
 524        if (object && !get_object(object))
 525                object = NULL;
 526        rcu_read_unlock();
 527
 528        return object;
 529}
 530
 531/*
 532 * Remove an object from the object_tree_root and object_list. Must be called
 533 * with the kmemleak_lock held _if_ kmemleak is still enabled.
 534 */
 535static void __remove_object(struct kmemleak_object *object)
 536{
 537        rb_erase(&object->rb_node, &object_tree_root);
 538        list_del_rcu(&object->object_list);
 539}
 540
 541/*
 542 * Look up an object in the object search tree and remove it from both
 543 * object_tree_root and object_list. The returned object's use_count should be
 544 * at least 1, as initially set by create_object().
 545 */
 546static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
 547{
 548        unsigned long flags;
 549        struct kmemleak_object *object;
 550
 551        raw_spin_lock_irqsave(&kmemleak_lock, flags);
 552        object = lookup_object(ptr, alias);
 553        if (object)
 554                __remove_object(object);
 555        raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 556
 557        return object;
 558}
 559
 560/*
 561 * Save stack trace to the given array of MAX_TRACE size.
 562 */
 563static int __save_stack_trace(unsigned long *trace)
 564{
 565        return stack_trace_save(trace, MAX_TRACE, 2);
 566}
 567
 568/*
 569 * Create the metadata (struct kmemleak_object) corresponding to an allocated
 570 * memory block and add it to the object_list and object_tree_root.
 571 */
 572static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 573                                             int min_count, gfp_t gfp)
 574{
 575        unsigned long flags;
 576        struct kmemleak_object *object, *parent;
 577        struct rb_node **link, *rb_parent;
 578        unsigned long untagged_ptr;
 579
 580        object = mem_pool_alloc(gfp);
 581        if (!object) {
 582                pr_warn("Cannot allocate a kmemleak_object structure\n");
 583                kmemleak_disable();
 584                return NULL;
 585        }
 586
 587        INIT_LIST_HEAD(&object->object_list);
 588        INIT_LIST_HEAD(&object->gray_list);
 589        INIT_HLIST_HEAD(&object->area_list);
 590        raw_spin_lock_init(&object->lock);
 591        atomic_set(&object->use_count, 1);
 592        object->flags = OBJECT_ALLOCATED;
 593        object->pointer = ptr;
 594        object->size = kfence_ksize((void *)ptr) ?: size;
 595        object->excess_ref = 0;
 596        object->min_count = min_count;
 597        object->count = 0;                      /* white color initially */
 598        object->jiffies = jiffies;
 599        object->checksum = 0;
 600
 601        /* task information */
 602        if (in_hardirq()) {
 603                object->pid = 0;
 604                strncpy(object->comm, "hardirq", sizeof(object->comm));
 605        } else if (in_serving_softirq()) {
 606                object->pid = 0;
 607                strncpy(object->comm, "softirq", sizeof(object->comm));
 608        } else {
 609                object->pid = current->pid;
 610                /*
 611                 * There is a small chance of a race with set_task_comm(),
 612                 * however using get_task_comm() here may cause locking
 613                 * dependency issues with current->alloc_lock. In the worst
 614                 * case, the command line is not correct.
 615                 */
 616                strncpy(object->comm, current->comm, sizeof(object->comm));
 617        }
 618
 619        /* kernel backtrace */
 620        object->trace_len = __save_stack_trace(object->trace);
 621
 622        raw_spin_lock_irqsave(&kmemleak_lock, flags);
 623
 624        untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
 625        min_addr = min(min_addr, untagged_ptr);
 626        max_addr = max(max_addr, untagged_ptr + size);
 627        link = &object_tree_root.rb_node;
 628        rb_parent = NULL;
 629        while (*link) {
 630                rb_parent = *link;
 631                parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
 632                if (ptr + size <= parent->pointer)
 633                        link = &parent->rb_node.rb_left;
 634                else if (parent->pointer + parent->size <= ptr)
 635                        link = &parent->rb_node.rb_right;
 636                else {
 637                        kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
 638                                      ptr);
 639                        /*
 640                         * No need for parent->lock here since "parent" cannot
 641                         * be freed while the kmemleak_lock is held.
 642                         */
 643                        dump_object_info(parent);
 644                        kmem_cache_free(object_cache, object);
 645                        object = NULL;
 646                        goto out;
 647                }
 648        }
 649        rb_link_node(&object->rb_node, rb_parent, link);
 650        rb_insert_color(&object->rb_node, &object_tree_root);
 651
 652        list_add_tail_rcu(&object->object_list, &object_list);
 653out:
 654        raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 655        return object;
 656}
 657
 658/*
 659 * Mark the object as not allocated and schedule RCU freeing via put_object().
 660 */
 661static void __delete_object(struct kmemleak_object *object)
 662{
 663        unsigned long flags;
 664
 665        WARN_ON(!(object->flags & OBJECT_ALLOCATED));
 666        WARN_ON(atomic_read(&object->use_count) < 1);
 667
 668        /*
 669         * Locking here also ensures that the corresponding memory block
 670         * cannot be freed when it is being scanned.
 671         */
 672        raw_spin_lock_irqsave(&object->lock, flags);
 673        object->flags &= ~OBJECT_ALLOCATED;
 674        raw_spin_unlock_irqrestore(&object->lock, flags);
 675        put_object(object);
 676}
 677
 678/*
 679 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 680 * delete it.
 681 */
 682static void delete_object_full(unsigned long ptr)
 683{
 684        struct kmemleak_object *object;
 685
 686        object = find_and_remove_object(ptr, 0);
 687        if (!object) {
 688#ifdef DEBUG
 689                kmemleak_warn("Freeing unknown object at 0x%08lx\n",
 690                              ptr);
 691#endif
 692                return;
 693        }
 694        __delete_object(object);
 695}
 696
 697/*
 698 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 699 * delete it. If the memory block is partially freed, the function may create
 700 * additional metadata for the remaining parts of the block.
 701 */
 702static void delete_object_part(unsigned long ptr, size_t size)
 703{
 704        struct kmemleak_object *object;
 705        unsigned long start, end;
 706
 707        object = find_and_remove_object(ptr, 1);
 708        if (!object) {
 709#ifdef DEBUG
 710                kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
 711                              ptr, size);
 712#endif
 713                return;
 714        }
 715
 716        /*
 717         * Create one or two objects that may result from the memory block
 718         * split. Note that partial freeing is only done by free_bootmem() and
 719         * this happens before kmemleak_init() is called.
 720         */
 721        start = object->pointer;
 722        end = object->pointer + object->size;
 723        if (ptr > start)
 724                create_object(start, ptr - start, object->min_count,
 725                              GFP_KERNEL);
 726        if (ptr + size < end)
 727                create_object(ptr + size, end - ptr - size, object->min_count,
 728                              GFP_KERNEL);
 729
 730        __delete_object(object);
 731}
 732
 733static void __paint_it(struct kmemleak_object *object, int color)
 734{
 735        object->min_count = color;
 736        if (color == KMEMLEAK_BLACK)
 737                object->flags |= OBJECT_NO_SCAN;
 738}
 739
 740static void paint_it(struct kmemleak_object *object, int color)
 741{
 742        unsigned long flags;
 743
 744        raw_spin_lock_irqsave(&object->lock, flags);
 745        __paint_it(object, color);
 746        raw_spin_unlock_irqrestore(&object->lock, flags);
 747}
 748
 749static void paint_ptr(unsigned long ptr, int color)
 750{
 751        struct kmemleak_object *object;
 752
 753        object = find_and_get_object(ptr, 0);
 754        if (!object) {
 755                kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
 756                              ptr,
 757                              (color == KMEMLEAK_GREY) ? "Grey" :
 758                              (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
 759                return;
 760        }
 761        paint_it(object, color);
 762        put_object(object);
 763}
 764
 765/*
 766 * Mark an object permanently as gray-colored so that it can no longer be
 767 * reported as a leak. This is used in general to mark a false positive.
 768 */
 769static void make_gray_object(unsigned long ptr)
 770{
 771        paint_ptr(ptr, KMEMLEAK_GREY);
 772}
 773
 774/*
 775 * Mark the object as black-colored so that it is ignored from scans and
 776 * reporting.
 777 */
 778static void make_black_object(unsigned long ptr)
 779{
 780        paint_ptr(ptr, KMEMLEAK_BLACK);
 781}
 782
 783/*
 784 * Add a scanning area to the object. If at least one such area is added,
 785 * kmemleak will only scan these ranges rather than the whole memory block.
 786 */
 787static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 788{
 789        unsigned long flags;
 790        struct kmemleak_object *object;
 791        struct kmemleak_scan_area *area = NULL;
 792
 793        object = find_and_get_object(ptr, 1);
 794        if (!object) {
 795                kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
 796                              ptr);
 797                return;
 798        }
 799
 800        if (scan_area_cache)
 801                area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 802
 803        raw_spin_lock_irqsave(&object->lock, flags);
 804        if (!area) {
 805                pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
 806                /* mark the object for full scan to avoid false positives */
 807                object->flags |= OBJECT_FULL_SCAN;
 808                goto out_unlock;
 809        }
 810        if (size == SIZE_MAX) {
 811                size = object->pointer + object->size - ptr;
 812        } else if (ptr + size > object->pointer + object->size) {
 813                kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 814                dump_object_info(object);
 815                kmem_cache_free(scan_area_cache, area);
 816                goto out_unlock;
 817        }
 818
 819        INIT_HLIST_NODE(&area->node);
 820        area->start = ptr;
 821        area->size = size;
 822
 823        hlist_add_head(&area->node, &object->area_list);
 824out_unlock:
 825        raw_spin_unlock_irqrestore(&object->lock, flags);
 826        put_object(object);
 827}
 828
 829/*
 830 * Any surplus references (object already gray) to 'ptr' are passed to
 831 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
 832 * vm_struct may be used as an alternative reference to the vmalloc'ed object
 833 * (see free_thread_stack()).
 834 */
 835static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
 836{
 837        unsigned long flags;
 838        struct kmemleak_object *object;
 839
 840        object = find_and_get_object(ptr, 0);
 841        if (!object) {
 842                kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
 843                              ptr);
 844                return;
 845        }
 846
 847        raw_spin_lock_irqsave(&object->lock, flags);
 848        object->excess_ref = excess_ref;
 849        raw_spin_unlock_irqrestore(&object->lock, flags);
 850        put_object(object);
 851}
 852
 853/*
 854 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
 855 * pointer. Such object will not be scanned by kmemleak but references to it
 856 * are searched.
 857 */
 858static void object_no_scan(unsigned long ptr)
 859{
 860        unsigned long flags;
 861        struct kmemleak_object *object;
 862
 863        object = find_and_get_object(ptr, 0);
 864        if (!object) {
 865                kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
 866                return;
 867        }
 868
 869        raw_spin_lock_irqsave(&object->lock, flags);
 870        object->flags |= OBJECT_NO_SCAN;
 871        raw_spin_unlock_irqrestore(&object->lock, flags);
 872        put_object(object);
 873}
 874
 875/**
 876 * kmemleak_alloc - register a newly allocated object
 877 * @ptr:        pointer to beginning of the object
 878 * @size:       size of the object
 879 * @min_count:  minimum number of references to this object. If during memory
 880 *              scanning a number of references less than @min_count is found,
 881 *              the object is reported as a memory leak. If @min_count is 0,
 882 *              the object is never reported as a leak. If @min_count is -1,
 883 *              the object is ignored (not scanned and not reported as a leak)
 884 * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
 885 *
 886 * This function is called from the kernel allocators when a new object
 887 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
 888 */
 889void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 890                          gfp_t gfp)
 891{
 892        pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
 893
 894        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 895                create_object((unsigned long)ptr, size, min_count, gfp);
 896}
 897EXPORT_SYMBOL_GPL(kmemleak_alloc);
 898
 899/**
 900 * kmemleak_alloc_percpu - register a newly allocated __percpu object
 901 * @ptr:        __percpu pointer to beginning of the object
 902 * @size:       size of the object
 903 * @gfp:        flags used for kmemleak internal memory allocations
 904 *
 905 * This function is called from the kernel percpu allocator when a new object
 906 * (memory block) is allocated (alloc_percpu).
 907 */
 908void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
 909                                 gfp_t gfp)
 910{
 911        unsigned int cpu;
 912
 913        pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
 914
 915        /*
 916         * Percpu allocations are only scanned and not reported as leaks
 917         * (min_count is set to 0).
 918         */
 919        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 920                for_each_possible_cpu(cpu)
 921                        create_object((unsigned long)per_cpu_ptr(ptr, cpu),
 922                                      size, 0, gfp);
 923}
 924EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
 925
 926/**
 927 * kmemleak_vmalloc - register a newly vmalloc'ed object
 928 * @area:       pointer to vm_struct
 929 * @size:       size of the object
 930 * @gfp:        __vmalloc() flags used for kmemleak internal memory allocations
 931 *
 932 * This function is called from the vmalloc() kernel allocator when a new
 933 * object (memory block) is allocated.
 934 */
 935void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
 936{
 937        pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
 938
 939        /*
 940         * A min_count = 2 is needed because vm_struct contains a reference to
 941         * the virtual address of the vmalloc'ed block.
 942         */
 943        if (kmemleak_enabled) {
 944                create_object((unsigned long)area->addr, size, 2, gfp);
 945                object_set_excess_ref((unsigned long)area,
 946                                      (unsigned long)area->addr);
 947        }
 948}
 949EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
 950
 951/**
 952 * kmemleak_free - unregister a previously registered object
 953 * @ptr:        pointer to beginning of the object
 954 *
 955 * This function is called from the kernel allocators when an object (memory
 956 * block) is freed (kmem_cache_free, kfree, vfree etc.).
 957 */
 958void __ref kmemleak_free(const void *ptr)
 959{
 960        pr_debug("%s(0x%p)\n", __func__, ptr);
 961
 962        if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
 963                delete_object_full((unsigned long)ptr);
 964}
 965EXPORT_SYMBOL_GPL(kmemleak_free);
 966
 967/**
 968 * kmemleak_free_part - partially unregister a previously registered object
 969 * @ptr:        pointer to the beginning or inside the object. This also
 970 *              represents the start of the range to be freed
 971 * @size:       size to be unregistered
 972 *
 973 * This function is called when only a part of a memory block is freed
 974 * (usually from the bootmem allocator).
 975 */
 976void __ref kmemleak_free_part(const void *ptr, size_t size)
 977{
 978        pr_debug("%s(0x%p)\n", __func__, ptr);
 979
 980        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 981                delete_object_part((unsigned long)ptr, size);
 982}
 983EXPORT_SYMBOL_GPL(kmemleak_free_part);
 984
 985/**
 986 * kmemleak_free_percpu - unregister a previously registered __percpu object
 987 * @ptr:        __percpu pointer to beginning of the object
 988 *
 989 * This function is called from the kernel percpu allocator when an object
 990 * (memory block) is freed (free_percpu).
 991 */
 992void __ref kmemleak_free_percpu(const void __percpu *ptr)
 993{
 994        unsigned int cpu;
 995
 996        pr_debug("%s(0x%p)\n", __func__, ptr);
 997
 998        if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
 999                for_each_possible_cpu(cpu)
1000                        delete_object_full((unsigned long)per_cpu_ptr(ptr,
1001                                                                      cpu));
1002}
1003EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1004
1005/**
1006 * kmemleak_update_trace - update object allocation stack trace
1007 * @ptr:        pointer to beginning of the object
1008 *
1009 * Override the object allocation stack trace for cases where the actual
1010 * allocation place is not always useful.
1011 */
1012void __ref kmemleak_update_trace(const void *ptr)
1013{
1014        struct kmemleak_object *object;
1015        unsigned long flags;
1016
1017        pr_debug("%s(0x%p)\n", __func__, ptr);
1018
1019        if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1020                return;
1021
1022        object = find_and_get_object((unsigned long)ptr, 1);
1023        if (!object) {
1024#ifdef DEBUG
1025                kmemleak_warn("Updating stack trace for unknown object at %p\n",
1026                              ptr);
1027#endif
1028                return;
1029        }
1030
1031        raw_spin_lock_irqsave(&object->lock, flags);
1032        object->trace_len = __save_stack_trace(object->trace);
1033        raw_spin_unlock_irqrestore(&object->lock, flags);
1034
1035        put_object(object);
1036}
1037EXPORT_SYMBOL(kmemleak_update_trace);
1038
1039/**
1040 * kmemleak_not_leak - mark an allocated object as false positive
1041 * @ptr:        pointer to beginning of the object
1042 *
1043 * Calling this function on an object will cause the memory block to no longer
1044 * be reported as leak and always be scanned.
1045 */
1046void __ref kmemleak_not_leak(const void *ptr)
1047{
1048        pr_debug("%s(0x%p)\n", __func__, ptr);
1049
1050        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1051                make_gray_object((unsigned long)ptr);
1052}
1053EXPORT_SYMBOL(kmemleak_not_leak);
1054
1055/**
1056 * kmemleak_ignore - ignore an allocated object
1057 * @ptr:        pointer to beginning of the object
1058 *
1059 * Calling this function on an object will cause the memory block to be
1060 * ignored (not scanned and not reported as a leak). This is usually done when
1061 * it is known that the corresponding block is not a leak and does not contain
1062 * any references to other allocated memory blocks.
1063 */
1064void __ref kmemleak_ignore(const void *ptr)
1065{
1066        pr_debug("%s(0x%p)\n", __func__, ptr);
1067
1068        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1069                make_black_object((unsigned long)ptr);
1070}
1071EXPORT_SYMBOL(kmemleak_ignore);
1072
1073/**
1074 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1075 * @ptr:        pointer to beginning or inside the object. This also
1076 *              represents the start of the scan area
1077 * @size:       size of the scan area
1078 * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1079 *
1080 * This function is used when it is known that only certain parts of an object
1081 * contain references to other objects. Kmemleak will only scan these areas
1082 * reducing the number false negatives.
1083 */
1084void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1085{
1086        pr_debug("%s(0x%p)\n", __func__, ptr);
1087
1088        if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1089                add_scan_area((unsigned long)ptr, size, gfp);
1090}
1091EXPORT_SYMBOL(kmemleak_scan_area);
1092
1093/**
1094 * kmemleak_no_scan - do not scan an allocated object
1095 * @ptr:        pointer to beginning of the object
1096 *
1097 * This function notifies kmemleak not to scan the given memory block. Useful
1098 * in situations where it is known that the given object does not contain any
1099 * references to other objects. Kmemleak will not scan such objects reducing
1100 * the number of false negatives.
1101 */
1102void __ref kmemleak_no_scan(const void *ptr)
1103{
1104        pr_debug("%s(0x%p)\n", __func__, ptr);
1105
1106        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1107                object_no_scan((unsigned long)ptr);
1108}
1109EXPORT_SYMBOL(kmemleak_no_scan);
1110
1111/**
1112 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1113 *                       address argument
1114 * @phys:       physical address of the object
1115 * @size:       size of the object
1116 * @min_count:  minimum number of references to this object.
1117 *              See kmemleak_alloc()
1118 * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1119 */
1120void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1121                               gfp_t gfp)
1122{
1123        if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1124                kmemleak_alloc(__va(phys), size, min_count, gfp);
1125}
1126EXPORT_SYMBOL(kmemleak_alloc_phys);
1127
1128/**
1129 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1130 *                           physical address argument
1131 * @phys:       physical address if the beginning or inside an object. This
1132 *              also represents the start of the range to be freed
1133 * @size:       size to be unregistered
1134 */
1135void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1136{
1137        if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1138                kmemleak_free_part(__va(phys), size);
1139}
1140EXPORT_SYMBOL(kmemleak_free_part_phys);
1141
1142/**
1143 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1144 *                          address argument
1145 * @phys:       physical address of the object
1146 */
1147void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1148{
1149        if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1150                kmemleak_not_leak(__va(phys));
1151}
1152EXPORT_SYMBOL(kmemleak_not_leak_phys);
1153
1154/**
1155 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1156 *                        address argument
1157 * @phys:       physical address of the object
1158 */
1159void __ref kmemleak_ignore_phys(phys_addr_t phys)
1160{
1161        if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1162                kmemleak_ignore(__va(phys));
1163}
1164EXPORT_SYMBOL(kmemleak_ignore_phys);
1165
1166/*
1167 * Update an object's checksum and return true if it was modified.
1168 */
1169static bool update_checksum(struct kmemleak_object *object)
1170{
1171        u32 old_csum = object->checksum;
1172
1173        kasan_disable_current();
1174        kcsan_disable_current();
1175        object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1176        kasan_enable_current();
1177        kcsan_enable_current();
1178
1179        return object->checksum != old_csum;
1180}
1181
1182/*
1183 * Update an object's references. object->lock must be held by the caller.
1184 */
1185static void update_refs(struct kmemleak_object *object)
1186{
1187        if (!color_white(object)) {
1188                /* non-orphan, ignored or new */
1189                return;
1190        }
1191
1192        /*
1193         * Increase the object's reference count (number of pointers to the
1194         * memory block). If this count reaches the required minimum, the
1195         * object's color will become gray and it will be added to the
1196         * gray_list.
1197         */
1198        object->count++;
1199        if (color_gray(object)) {
1200                /* put_object() called when removing from gray_list */
1201                WARN_ON(!get_object(object));
1202                list_add_tail(&object->gray_list, &gray_list);
1203        }
1204}
1205
1206/*
1207 * Memory scanning is a long process and it needs to be interruptible. This
1208 * function checks whether such interrupt condition occurred.
1209 */
1210static int scan_should_stop(void)
1211{
1212        if (!kmemleak_enabled)
1213                return 1;
1214
1215        /*
1216         * This function may be called from either process or kthread context,
1217         * hence the need to check for both stop conditions.
1218         */
1219        if (current->mm)
1220                return signal_pending(current);
1221        else
1222                return kthread_should_stop();
1223
1224        return 0;
1225}
1226
1227/*
1228 * Scan a memory block (exclusive range) for valid pointers and add those
1229 * found to the gray list.
1230 */
1231static void scan_block(void *_start, void *_end,
1232                       struct kmemleak_object *scanned)
1233{
1234        unsigned long *ptr;
1235        unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1236        unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1237        unsigned long flags;
1238        unsigned long untagged_ptr;
1239
1240        raw_spin_lock_irqsave(&kmemleak_lock, flags);
1241        for (ptr = start; ptr < end; ptr++) {
1242                struct kmemleak_object *object;
1243                unsigned long pointer;
1244                unsigned long excess_ref;
1245
1246                if (scan_should_stop())
1247                        break;
1248
1249                kasan_disable_current();
1250                pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1251                kasan_enable_current();
1252
1253                untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1254                if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1255                        continue;
1256
1257                /*
1258                 * No need for get_object() here since we hold kmemleak_lock.
1259                 * object->use_count cannot be dropped to 0 while the object
1260                 * is still present in object_tree_root and object_list
1261                 * (with updates protected by kmemleak_lock).
1262                 */
1263                object = lookup_object(pointer, 1);
1264                if (!object)
1265                        continue;
1266                if (object == scanned)
1267                        /* self referenced, ignore */
1268                        continue;
1269
1270                /*
1271                 * Avoid the lockdep recursive warning on object->lock being
1272                 * previously acquired in scan_object(). These locks are
1273                 * enclosed by scan_mutex.
1274                 */
1275                raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1276                /* only pass surplus references (object already gray) */
1277                if (color_gray(object)) {
1278                        excess_ref = object->excess_ref;
1279                        /* no need for update_refs() if object already gray */
1280                } else {
1281                        excess_ref = 0;
1282                        update_refs(object);
1283                }
1284                raw_spin_unlock(&object->lock);
1285
1286                if (excess_ref) {
1287                        object = lookup_object(excess_ref, 0);
1288                        if (!object)
1289                                continue;
1290                        if (object == scanned)
1291                                /* circular reference, ignore */
1292                                continue;
1293                        raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1294                        update_refs(object);
1295                        raw_spin_unlock(&object->lock);
1296                }
1297        }
1298        raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1299}
1300
1301/*
1302 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1303 */
1304#ifdef CONFIG_SMP
1305static void scan_large_block(void *start, void *end)
1306{
1307        void *next;
1308
1309        while (start < end) {
1310                next = min(start + MAX_SCAN_SIZE, end);
1311                scan_block(start, next, NULL);
1312                start = next;
1313                cond_resched();
1314        }
1315}
1316#endif
1317
1318/*
1319 * Scan a memory block corresponding to a kmemleak_object. A condition is
1320 * that object->use_count >= 1.
1321 */
1322static void scan_object(struct kmemleak_object *object)
1323{
1324        struct kmemleak_scan_area *area;
1325        unsigned long flags;
1326
1327        /*
1328         * Once the object->lock is acquired, the corresponding memory block
1329         * cannot be freed (the same lock is acquired in delete_object).
1330         */
1331        raw_spin_lock_irqsave(&object->lock, flags);
1332        if (object->flags & OBJECT_NO_SCAN)
1333                goto out;
1334        if (!(object->flags & OBJECT_ALLOCATED))
1335                /* already freed object */
1336                goto out;
1337        if (hlist_empty(&object->area_list) ||
1338            object->flags & OBJECT_FULL_SCAN) {
1339                void *start = (void *)object->pointer;
1340                void *end = (void *)(object->pointer + object->size);
1341                void *next;
1342
1343                do {
1344                        next = min(start + MAX_SCAN_SIZE, end);
1345                        scan_block(start, next, object);
1346
1347                        start = next;
1348                        if (start >= end)
1349                                break;
1350
1351                        raw_spin_unlock_irqrestore(&object->lock, flags);
1352                        cond_resched();
1353                        raw_spin_lock_irqsave(&object->lock, flags);
1354                } while (object->flags & OBJECT_ALLOCATED);
1355        } else
1356                hlist_for_each_entry(area, &object->area_list, node)
1357                        scan_block((void *)area->start,
1358                                   (void *)(area->start + area->size),
1359                                   object);
1360out:
1361        raw_spin_unlock_irqrestore(&object->lock, flags);
1362}
1363
1364/*
1365 * Scan the objects already referenced (gray objects). More objects will be
1366 * referenced and, if there are no memory leaks, all the objects are scanned.
1367 */
1368static void scan_gray_list(void)
1369{
1370        struct kmemleak_object *object, *tmp;
1371
1372        /*
1373         * The list traversal is safe for both tail additions and removals
1374         * from inside the loop. The kmemleak objects cannot be freed from
1375         * outside the loop because their use_count was incremented.
1376         */
1377        object = list_entry(gray_list.next, typeof(*object), gray_list);
1378        while (&object->gray_list != &gray_list) {
1379                cond_resched();
1380
1381                /* may add new objects to the list */
1382                if (!scan_should_stop())
1383                        scan_object(object);
1384
1385                tmp = list_entry(object->gray_list.next, typeof(*object),
1386                                 gray_list);
1387
1388                /* remove the object from the list and release it */
1389                list_del(&object->gray_list);
1390                put_object(object);
1391
1392                object = tmp;
1393        }
1394        WARN_ON(!list_empty(&gray_list));
1395}
1396
1397/*
1398 * Scan data sections and all the referenced memory blocks allocated via the
1399 * kernel's standard allocators. This function must be called with the
1400 * scan_mutex held.
1401 */
1402static void kmemleak_scan(void)
1403{
1404        unsigned long flags;
1405        struct kmemleak_object *object;
1406        int i;
1407        int new_leaks = 0;
1408
1409        jiffies_last_scan = jiffies;
1410
1411        /* prepare the kmemleak_object's */
1412        rcu_read_lock();
1413        list_for_each_entry_rcu(object, &object_list, object_list) {
1414                raw_spin_lock_irqsave(&object->lock, flags);
1415#ifdef DEBUG
1416                /*
1417                 * With a few exceptions there should be a maximum of
1418                 * 1 reference to any object at this point.
1419                 */
1420                if (atomic_read(&object->use_count) > 1) {
1421                        pr_debug("object->use_count = %d\n",
1422                                 atomic_read(&object->use_count));
1423                        dump_object_info(object);
1424                }
1425#endif
1426                /* reset the reference count (whiten the object) */
1427                object->count = 0;
1428                if (color_gray(object) && get_object(object))
1429                        list_add_tail(&object->gray_list, &gray_list);
1430
1431                raw_spin_unlock_irqrestore(&object->lock, flags);
1432        }
1433        rcu_read_unlock();
1434
1435#ifdef CONFIG_SMP
1436        /* per-cpu sections scanning */
1437        for_each_possible_cpu(i)
1438                scan_large_block(__per_cpu_start + per_cpu_offset(i),
1439                                 __per_cpu_end + per_cpu_offset(i));
1440#endif
1441
1442        /*
1443         * Struct page scanning for each node.
1444         */
1445        get_online_mems();
1446        for_each_online_node(i) {
1447                unsigned long start_pfn = node_start_pfn(i);
1448                unsigned long end_pfn = node_end_pfn(i);
1449                unsigned long pfn;
1450
1451                for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1452                        struct page *page = pfn_to_online_page(pfn);
1453
1454                        if (!page)
1455                                continue;
1456
1457                        /* only scan pages belonging to this node */
1458                        if (page_to_nid(page) != i)
1459                                continue;
1460                        /* only scan if page is in use */
1461                        if (page_count(page) == 0)
1462                                continue;
1463                        scan_block(page, page + 1, NULL);
1464                        if (!(pfn & 63))
1465                                cond_resched();
1466                }
1467        }
1468        put_online_mems();
1469
1470        /*
1471         * Scanning the task stacks (may introduce false negatives).
1472         */
1473        if (kmemleak_stack_scan) {
1474                struct task_struct *p, *g;
1475
1476                rcu_read_lock();
1477                for_each_process_thread(g, p) {
1478                        void *stack = try_get_task_stack(p);
1479                        if (stack) {
1480                                scan_block(stack, stack + THREAD_SIZE, NULL);
1481                                put_task_stack(p);
1482                        }
1483                }
1484                rcu_read_unlock();
1485        }
1486
1487        /*
1488         * Scan the objects already referenced from the sections scanned
1489         * above.
1490         */
1491        scan_gray_list();
1492
1493        /*
1494         * Check for new or unreferenced objects modified since the previous
1495         * scan and color them gray until the next scan.
1496         */
1497        rcu_read_lock();
1498        list_for_each_entry_rcu(object, &object_list, object_list) {
1499                raw_spin_lock_irqsave(&object->lock, flags);
1500                if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1501                    && update_checksum(object) && get_object(object)) {
1502                        /* color it gray temporarily */
1503                        object->count = object->min_count;
1504                        list_add_tail(&object->gray_list, &gray_list);
1505                }
1506                raw_spin_unlock_irqrestore(&object->lock, flags);
1507        }
1508        rcu_read_unlock();
1509
1510        /*
1511         * Re-scan the gray list for modified unreferenced objects.
1512         */
1513        scan_gray_list();
1514
1515        /*
1516         * If scanning was stopped do not report any new unreferenced objects.
1517         */
1518        if (scan_should_stop())
1519                return;
1520
1521        /*
1522         * Scanning result reporting.
1523         */
1524        rcu_read_lock();
1525        list_for_each_entry_rcu(object, &object_list, object_list) {
1526                raw_spin_lock_irqsave(&object->lock, flags);
1527                if (unreferenced_object(object) &&
1528                    !(object->flags & OBJECT_REPORTED)) {
1529                        object->flags |= OBJECT_REPORTED;
1530
1531                        if (kmemleak_verbose)
1532                                print_unreferenced(NULL, object);
1533
1534                        new_leaks++;
1535                }
1536                raw_spin_unlock_irqrestore(&object->lock, flags);
1537        }
1538        rcu_read_unlock();
1539
1540        if (new_leaks) {
1541                kmemleak_found_leaks = true;
1542
1543                pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1544                        new_leaks);
1545        }
1546
1547}
1548
1549/*
1550 * Thread function performing automatic memory scanning. Unreferenced objects
1551 * at the end of a memory scan are reported but only the first time.
1552 */
1553static int kmemleak_scan_thread(void *arg)
1554{
1555        static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1556
1557        pr_info("Automatic memory scanning thread started\n");
1558        set_user_nice(current, 10);
1559
1560        /*
1561         * Wait before the first scan to allow the system to fully initialize.
1562         */
1563        if (first_run) {
1564                signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1565                first_run = 0;
1566                while (timeout && !kthread_should_stop())
1567                        timeout = schedule_timeout_interruptible(timeout);
1568        }
1569
1570        while (!kthread_should_stop()) {
1571                signed long timeout = READ_ONCE(jiffies_scan_wait);
1572
1573                mutex_lock(&scan_mutex);
1574                kmemleak_scan();
1575                mutex_unlock(&scan_mutex);
1576
1577                /* wait before the next scan */
1578                while (timeout && !kthread_should_stop())
1579                        timeout = schedule_timeout_interruptible(timeout);
1580        }
1581
1582        pr_info("Automatic memory scanning thread ended\n");
1583
1584        return 0;
1585}
1586
1587/*
1588 * Start the automatic memory scanning thread. This function must be called
1589 * with the scan_mutex held.
1590 */
1591static void start_scan_thread(void)
1592{
1593        if (scan_thread)
1594                return;
1595        scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1596        if (IS_ERR(scan_thread)) {
1597                pr_warn("Failed to create the scan thread\n");
1598                scan_thread = NULL;
1599        }
1600}
1601
1602/*
1603 * Stop the automatic memory scanning thread.
1604 */
1605static void stop_scan_thread(void)
1606{
1607        if (scan_thread) {
1608                kthread_stop(scan_thread);
1609                scan_thread = NULL;
1610        }
1611}
1612
1613/*
1614 * Iterate over the object_list and return the first valid object at or after
1615 * the required position with its use_count incremented. The function triggers
1616 * a memory scanning when the pos argument points to the first position.
1617 */
1618static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1619{
1620        struct kmemleak_object *object;
1621        loff_t n = *pos;
1622        int err;
1623
1624        err = mutex_lock_interruptible(&scan_mutex);
1625        if (err < 0)
1626                return ERR_PTR(err);
1627
1628        rcu_read_lock();
1629        list_for_each_entry_rcu(object, &object_list, object_list) {
1630                if (n-- > 0)
1631                        continue;
1632                if (get_object(object))
1633                        goto out;
1634        }
1635        object = NULL;
1636out:
1637        return object;
1638}
1639
1640/*
1641 * Return the next object in the object_list. The function decrements the
1642 * use_count of the previous object and increases that of the next one.
1643 */
1644static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1645{
1646        struct kmemleak_object *prev_obj = v;
1647        struct kmemleak_object *next_obj = NULL;
1648        struct kmemleak_object *obj = prev_obj;
1649
1650        ++(*pos);
1651
1652        list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1653                if (get_object(obj)) {
1654                        next_obj = obj;
1655                        break;
1656                }
1657        }
1658
1659        put_object(prev_obj);
1660        return next_obj;
1661}
1662
1663/*
1664 * Decrement the use_count of the last object required, if any.
1665 */
1666static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1667{
1668        if (!IS_ERR(v)) {
1669                /*
1670                 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1671                 * waiting was interrupted, so only release it if !IS_ERR.
1672                 */
1673                rcu_read_unlock();
1674                mutex_unlock(&scan_mutex);
1675                if (v)
1676                        put_object(v);
1677        }
1678}
1679
1680/*
1681 * Print the information for an unreferenced object to the seq file.
1682 */
1683static int kmemleak_seq_show(struct seq_file *seq, void *v)
1684{
1685        struct kmemleak_object *object = v;
1686        unsigned long flags;
1687
1688        raw_spin_lock_irqsave(&object->lock, flags);
1689        if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1690                print_unreferenced(seq, object);
1691        raw_spin_unlock_irqrestore(&object->lock, flags);
1692        return 0;
1693}
1694
1695static const struct seq_operations kmemleak_seq_ops = {
1696        .start = kmemleak_seq_start,
1697        .next  = kmemleak_seq_next,
1698        .stop  = kmemleak_seq_stop,
1699        .show  = kmemleak_seq_show,
1700};
1701
1702static int kmemleak_open(struct inode *inode, struct file *file)
1703{
1704        return seq_open(file, &kmemleak_seq_ops);
1705}
1706
1707static int dump_str_object_info(const char *str)
1708{
1709        unsigned long flags;
1710        struct kmemleak_object *object;
1711        unsigned long addr;
1712
1713        if (kstrtoul(str, 0, &addr))
1714                return -EINVAL;
1715        object = find_and_get_object(addr, 0);
1716        if (!object) {
1717                pr_info("Unknown object at 0x%08lx\n", addr);
1718                return -EINVAL;
1719        }
1720
1721        raw_spin_lock_irqsave(&object->lock, flags);
1722        dump_object_info(object);
1723        raw_spin_unlock_irqrestore(&object->lock, flags);
1724
1725        put_object(object);
1726        return 0;
1727}
1728
1729/*
1730 * We use grey instead of black to ensure we can do future scans on the same
1731 * objects. If we did not do future scans these black objects could
1732 * potentially contain references to newly allocated objects in the future and
1733 * we'd end up with false positives.
1734 */
1735static void kmemleak_clear(void)
1736{
1737        struct kmemleak_object *object;
1738        unsigned long flags;
1739
1740        rcu_read_lock();
1741        list_for_each_entry_rcu(object, &object_list, object_list) {
1742                raw_spin_lock_irqsave(&object->lock, flags);
1743                if ((object->flags & OBJECT_REPORTED) &&
1744                    unreferenced_object(object))
1745                        __paint_it(object, KMEMLEAK_GREY);
1746                raw_spin_unlock_irqrestore(&object->lock, flags);
1747        }
1748        rcu_read_unlock();
1749
1750        kmemleak_found_leaks = false;
1751}
1752
1753static void __kmemleak_do_cleanup(void);
1754
1755/*
1756 * File write operation to configure kmemleak at run-time. The following
1757 * commands can be written to the /sys/kernel/debug/kmemleak file:
1758 *   off        - disable kmemleak (irreversible)
1759 *   stack=on   - enable the task stacks scanning
1760 *   stack=off  - disable the tasks stacks scanning
1761 *   scan=on    - start the automatic memory scanning thread
1762 *   scan=off   - stop the automatic memory scanning thread
1763 *   scan=...   - set the automatic memory scanning period in seconds (0 to
1764 *                disable it)
1765 *   scan       - trigger a memory scan
1766 *   clear      - mark all current reported unreferenced kmemleak objects as
1767 *                grey to ignore printing them, or free all kmemleak objects
1768 *                if kmemleak has been disabled.
1769 *   dump=...   - dump information about the object found at the given address
1770 */
1771static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1772                              size_t size, loff_t *ppos)
1773{
1774        char buf[64];
1775        int buf_size;
1776        int ret;
1777
1778        buf_size = min(size, (sizeof(buf) - 1));
1779        if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1780                return -EFAULT;
1781        buf[buf_size] = 0;
1782
1783        ret = mutex_lock_interruptible(&scan_mutex);
1784        if (ret < 0)
1785                return ret;
1786
1787        if (strncmp(buf, "clear", 5) == 0) {
1788                if (kmemleak_enabled)
1789                        kmemleak_clear();
1790                else
1791                        __kmemleak_do_cleanup();
1792                goto out;
1793        }
1794
1795        if (!kmemleak_enabled) {
1796                ret = -EPERM;
1797                goto out;
1798        }
1799
1800        if (strncmp(buf, "off", 3) == 0)
1801                kmemleak_disable();
1802        else if (strncmp(buf, "stack=on", 8) == 0)
1803                kmemleak_stack_scan = 1;
1804        else if (strncmp(buf, "stack=off", 9) == 0)
1805                kmemleak_stack_scan = 0;
1806        else if (strncmp(buf, "scan=on", 7) == 0)
1807                start_scan_thread();
1808        else if (strncmp(buf, "scan=off", 8) == 0)
1809                stop_scan_thread();
1810        else if (strncmp(buf, "scan=", 5) == 0) {
1811                unsigned secs;
1812                unsigned long msecs;
1813
1814                ret = kstrtouint(buf + 5, 0, &secs);
1815                if (ret < 0)
1816                        goto out;
1817
1818                msecs = secs * MSEC_PER_SEC;
1819                if (msecs > UINT_MAX)
1820                        msecs = UINT_MAX;
1821
1822                stop_scan_thread();
1823                if (msecs) {
1824                        WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
1825                        start_scan_thread();
1826                }
1827        } else if (strncmp(buf, "scan", 4) == 0)
1828                kmemleak_scan();
1829        else if (strncmp(buf, "dump=", 5) == 0)
1830                ret = dump_str_object_info(buf + 5);
1831        else
1832                ret = -EINVAL;
1833
1834out:
1835        mutex_unlock(&scan_mutex);
1836        if (ret < 0)
1837                return ret;
1838
1839        /* ignore the rest of the buffer, only one command at a time */
1840        *ppos += size;
1841        return size;
1842}
1843
1844static const struct file_operations kmemleak_fops = {
1845        .owner          = THIS_MODULE,
1846        .open           = kmemleak_open,
1847        .read           = seq_read,
1848        .write          = kmemleak_write,
1849        .llseek         = seq_lseek,
1850        .release        = seq_release,
1851};
1852
1853static void __kmemleak_do_cleanup(void)
1854{
1855        struct kmemleak_object *object, *tmp;
1856
1857        /*
1858         * Kmemleak has already been disabled, no need for RCU list traversal
1859         * or kmemleak_lock held.
1860         */
1861        list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1862                __remove_object(object);
1863                __delete_object(object);
1864        }
1865}
1866
1867/*
1868 * Stop the memory scanning thread and free the kmemleak internal objects if
1869 * no previous scan thread (otherwise, kmemleak may still have some useful
1870 * information on memory leaks).
1871 */
1872static void kmemleak_do_cleanup(struct work_struct *work)
1873{
1874        stop_scan_thread();
1875
1876        mutex_lock(&scan_mutex);
1877        /*
1878         * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1879         * longer track object freeing. Ordering of the scan thread stopping and
1880         * the memory accesses below is guaranteed by the kthread_stop()
1881         * function.
1882         */
1883        kmemleak_free_enabled = 0;
1884        mutex_unlock(&scan_mutex);
1885
1886        if (!kmemleak_found_leaks)
1887                __kmemleak_do_cleanup();
1888        else
1889                pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1890}
1891
1892static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1893
1894/*
1895 * Disable kmemleak. No memory allocation/freeing will be traced once this
1896 * function is called. Disabling kmemleak is an irreversible operation.
1897 */
1898static void kmemleak_disable(void)
1899{
1900        /* atomically check whether it was already invoked */
1901        if (cmpxchg(&kmemleak_error, 0, 1))
1902                return;
1903
1904        /* stop any memory operation tracing */
1905        kmemleak_enabled = 0;
1906
1907        /* check whether it is too early for a kernel thread */
1908        if (kmemleak_initialized)
1909                schedule_work(&cleanup_work);
1910        else
1911                kmemleak_free_enabled = 0;
1912
1913        pr_info("Kernel memory leak detector disabled\n");
1914}
1915
1916/*
1917 * Allow boot-time kmemleak disabling (enabled by default).
1918 */
1919static int __init kmemleak_boot_config(char *str)
1920{
1921        if (!str)
1922                return -EINVAL;
1923        if (strcmp(str, "off") == 0)
1924                kmemleak_disable();
1925        else if (strcmp(str, "on") == 0)
1926                kmemleak_skip_disable = 1;
1927        else
1928                return -EINVAL;
1929        return 0;
1930}
1931early_param("kmemleak", kmemleak_boot_config);
1932
1933/*
1934 * Kmemleak initialization.
1935 */
1936void __init kmemleak_init(void)
1937{
1938#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1939        if (!kmemleak_skip_disable) {
1940                kmemleak_disable();
1941                return;
1942        }
1943#endif
1944
1945        if (kmemleak_error)
1946                return;
1947
1948        jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1949        jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1950
1951        object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1952        scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1953
1954        /* register the data/bss sections */
1955        create_object((unsigned long)_sdata, _edata - _sdata,
1956                      KMEMLEAK_GREY, GFP_ATOMIC);
1957        create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1958                      KMEMLEAK_GREY, GFP_ATOMIC);
1959        /* only register .data..ro_after_init if not within .data */
1960        if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
1961                create_object((unsigned long)__start_ro_after_init,
1962                              __end_ro_after_init - __start_ro_after_init,
1963                              KMEMLEAK_GREY, GFP_ATOMIC);
1964}
1965
1966/*
1967 * Late initialization function.
1968 */
1969static int __init kmemleak_late_init(void)
1970{
1971        kmemleak_initialized = 1;
1972
1973        debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
1974
1975        if (kmemleak_error) {
1976                /*
1977                 * Some error occurred and kmemleak was disabled. There is a
1978                 * small chance that kmemleak_disable() was called immediately
1979                 * after setting kmemleak_initialized and we may end up with
1980                 * two clean-up threads but serialized by scan_mutex.
1981                 */
1982                schedule_work(&cleanup_work);
1983                return -ENOMEM;
1984        }
1985
1986        if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1987                mutex_lock(&scan_mutex);
1988                start_scan_thread();
1989                mutex_unlock(&scan_mutex);
1990        }
1991
1992        pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1993                mem_pool_free_count);
1994
1995        return 0;
1996}
1997late_initcall(kmemleak_late_init);
1998