linux/mm/kmemleak.c
<<
>>
Prefs
   1/*
   2 * mm/kmemleak.c
   3 *
   4 * Copyright (C) 2008 ARM Limited
   5 * Written by Catalin Marinas <catalin.marinas@arm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19 *
  20 *
  21 * For more information on the algorithm and kmemleak usage, please see
  22 * Documentation/dev-tools/kmemleak.rst.
  23 *
  24 * Notes on locking
  25 * ----------------
  26 *
  27 * The following locks and mutexes are used by kmemleak:
  28 *
  29 * - kmemleak_lock (rwlock): protects the object_list modifications and
  30 *   accesses to the object_tree_root. The object_list is the main list
  31 *   holding the metadata (struct kmemleak_object) for the allocated memory
  32 *   blocks. The object_tree_root is a red black tree used to look-up
  33 *   metadata based on a pointer to the corresponding memory block.  The
  34 *   kmemleak_object structures are added to the object_list and
  35 *   object_tree_root in the create_object() function called from the
  36 *   kmemleak_alloc() callback and removed in delete_object() called from the
  37 *   kmemleak_free() callback
  38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
  39 *   the metadata (e.g. count) are protected by this lock. Note that some
  40 *   members of this structure may be protected by other means (atomic or
  41 *   kmemleak_lock). This lock is also held when scanning the corresponding
  42 *   memory block to avoid the kernel freeing it via the kmemleak_free()
  43 *   callback. This is less heavyweight than holding a global lock like
  44 *   kmemleak_lock during scanning
  45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
  46 *   unreferenced objects at a time. The gray_list contains the objects which
  47 *   are already referenced or marked as false positives and need to be
  48 *   scanned. This list is only modified during a scanning episode when the
  49 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
  50 *   Note that the kmemleak_object.use_count is incremented when an object is
  51 *   added to the gray_list and therefore cannot be freed. This mutex also
  52 *   prevents multiple users of the "kmemleak" debugfs file together with
  53 *   modifications to the memory scanning parameters including the scan_thread
  54 *   pointer
  55 *
  56 * Locks and mutexes are acquired/nested in the following order:
  57 *
  58 *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
  59 *
  60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
  61 * regions.
  62 *
  63 * The kmemleak_object structures have a use_count incremented or decremented
  64 * using the get_object()/put_object() functions. When the use_count becomes
  65 * 0, this count can no longer be incremented and put_object() schedules the
  66 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
  67 * function must be protected by rcu_read_lock() to avoid accessing a freed
  68 * structure.
  69 */
  70
  71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  72
  73#include <linux/init.h>
  74#include <linux/kernel.h>
  75#include <linux/list.h>
  76#include <linux/sched.h>
  77#include <linux/jiffies.h>
  78#include <linux/delay.h>
  79#include <linux/export.h>
  80#include <linux/kthread.h>
  81#include <linux/rbtree.h>
  82#include <linux/fs.h>
  83#include <linux/debugfs.h>
  84#include <linux/seq_file.h>
  85#include <linux/cpumask.h>
  86#include <linux/spinlock.h>
  87#include <linux/mutex.h>
  88#include <linux/rcupdate.h>
  89#include <linux/stacktrace.h>
  90#include <linux/cache.h>
  91#include <linux/percpu.h>
  92#include <linux/hardirq.h>
  93#include <linux/bootmem.h>
  94#include <linux/pfn.h>
  95#include <linux/mmzone.h>
  96#include <linux/slab.h>
  97#include <linux/thread_info.h>
  98#include <linux/err.h>
  99#include <linux/uaccess.h>
 100#include <linux/string.h>
 101#include <linux/nodemask.h>
 102#include <linux/mm.h>
 103#include <linux/workqueue.h>
 104#include <linux/crc32.h>
 105
 106#include <asm/sections.h>
 107#include <asm/processor.h>
 108#include <linux/atomic.h>
 109
 110#include <linux/kasan.h>
 111#include <linux/kmemcheck.h>
 112#include <linux/kmemleak.h>
 113#include <linux/memory_hotplug.h>
 114
 115/*
 116 * Kmemleak configuration and common defines.
 117 */
 118#define MAX_TRACE               16      /* stack trace length */
 119#define MSECS_MIN_AGE           5000    /* minimum object age for reporting */
 120#define SECS_FIRST_SCAN         60      /* delay before the first scan */
 121#define SECS_SCAN_WAIT          600     /* subsequent auto scanning delay */
 122#define MAX_SCAN_SIZE           4096    /* maximum size of a scanned block */
 123
 124#define BYTES_PER_POINTER       sizeof(void *)
 125
 126/* GFP bitmask for kmemleak internal allocations */
 127#define gfp_kmemleak_mask(gfp)  (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
 128                                 __GFP_NORETRY | __GFP_NOMEMALLOC | \
 129                                 __GFP_NOWARN)
 130
 131/* scanning area inside a memory block */
 132struct kmemleak_scan_area {
 133        struct hlist_node node;
 134        unsigned long start;
 135        size_t size;
 136};
 137
 138#define KMEMLEAK_GREY   0
 139#define KMEMLEAK_BLACK  -1
 140
 141/*
 142 * Structure holding the metadata for each allocated memory block.
 143 * Modifications to such objects should be made while holding the
 144 * object->lock. Insertions or deletions from object_list, gray_list or
 145 * rb_node are already protected by the corresponding locks or mutex (see
 146 * the notes on locking above). These objects are reference-counted
 147 * (use_count) and freed using the RCU mechanism.
 148 */
 149struct kmemleak_object {
 150        spinlock_t lock;
 151        unsigned long flags;            /* object status flags */
 152        struct list_head object_list;
 153        struct list_head gray_list;
 154        struct rb_node rb_node;
 155        struct rcu_head rcu;            /* object_list lockless traversal */
 156        /* object usage count; object freed when use_count == 0 */
 157        atomic_t use_count;
 158        unsigned long pointer;
 159        size_t size;
 160        /* minimum number of a pointers found before it is considered leak */
 161        int min_count;
 162        /* the total number of pointers found pointing to this object */
 163        int count;
 164        /* checksum for detecting modified objects */
 165        u32 checksum;
 166        /* memory ranges to be scanned inside an object (empty for all) */
 167        struct hlist_head area_list;
 168        unsigned long trace[MAX_TRACE];
 169        unsigned int trace_len;
 170        unsigned long jiffies;          /* creation timestamp */
 171        pid_t pid;                      /* pid of the current task */
 172        char comm[TASK_COMM_LEN];       /* executable name */
 173};
 174
 175/* flag representing the memory block allocation status */
 176#define OBJECT_ALLOCATED        (1 << 0)
 177/* flag set after the first reporting of an unreference object */
 178#define OBJECT_REPORTED         (1 << 1)
 179/* flag set to not scan the object */
 180#define OBJECT_NO_SCAN          (1 << 2)
 181
 182/* number of bytes to print per line; must be 16 or 32 */
 183#define HEX_ROW_SIZE            16
 184/* number of bytes to print at a time (1, 2, 4, 8) */
 185#define HEX_GROUP_SIZE          1
 186/* include ASCII after the hex output */
 187#define HEX_ASCII               1
 188/* max number of lines to be printed */
 189#define HEX_MAX_LINES           2
 190
 191/* the list of all allocated objects */
 192static LIST_HEAD(object_list);
 193/* the list of gray-colored objects (see color_gray comment below) */
 194static LIST_HEAD(gray_list);
 195/* search tree for object boundaries */
 196static struct rb_root object_tree_root = RB_ROOT;
 197/* rw_lock protecting the access to object_list and object_tree_root */
 198static DEFINE_RWLOCK(kmemleak_lock);
 199
 200/* allocation caches for kmemleak internal data */
 201static struct kmem_cache *object_cache;
 202static struct kmem_cache *scan_area_cache;
 203
 204/* set if tracing memory operations is enabled */
 205static int kmemleak_enabled;
 206/* same as above but only for the kmemleak_free() callback */
 207static int kmemleak_free_enabled;
 208/* set in the late_initcall if there were no errors */
 209static int kmemleak_initialized;
 210/* enables or disables early logging of the memory operations */
 211static int kmemleak_early_log = 1;
 212/* set if a kmemleak warning was issued */
 213static int kmemleak_warning;
 214/* set if a fatal kmemleak error has occurred */
 215static int kmemleak_error;
 216
 217/* minimum and maximum address that may be valid pointers */
 218static unsigned long min_addr = ULONG_MAX;
 219static unsigned long max_addr;
 220
 221static struct task_struct *scan_thread;
 222/* used to avoid reporting of recently allocated objects */
 223static unsigned long jiffies_min_age;
 224static unsigned long jiffies_last_scan;
 225/* delay between automatic memory scannings */
 226static signed long jiffies_scan_wait;
 227/* enables or disables the task stacks scanning */
 228static int kmemleak_stack_scan = 1;
 229/* protects the memory scanning, parameters and debug/kmemleak file access */
 230static DEFINE_MUTEX(scan_mutex);
 231/* setting kmemleak=on, will set this var, skipping the disable */
 232static int kmemleak_skip_disable;
 233/* If there are leaks that can be reported */
 234static bool kmemleak_found_leaks;
 235
 236/*
 237 * Early object allocation/freeing logging. Kmemleak is initialized after the
 238 * kernel allocator. However, both the kernel allocator and kmemleak may
 239 * allocate memory blocks which need to be tracked. Kmemleak defines an
 240 * arbitrary buffer to hold the allocation/freeing information before it is
 241 * fully initialized.
 242 */
 243
 244/* kmemleak operation type for early logging */
 245enum {
 246        KMEMLEAK_ALLOC,
 247        KMEMLEAK_ALLOC_PERCPU,
 248        KMEMLEAK_FREE,
 249        KMEMLEAK_FREE_PART,
 250        KMEMLEAK_FREE_PERCPU,
 251        KMEMLEAK_NOT_LEAK,
 252        KMEMLEAK_IGNORE,
 253        KMEMLEAK_SCAN_AREA,
 254        KMEMLEAK_NO_SCAN
 255};
 256
 257/*
 258 * Structure holding the information passed to kmemleak callbacks during the
 259 * early logging.
 260 */
 261struct early_log {
 262        int op_type;                    /* kmemleak operation type */
 263        const void *ptr;                /* allocated/freed memory block */
 264        size_t size;                    /* memory block size */
 265        int min_count;                  /* minimum reference count */
 266        unsigned long trace[MAX_TRACE]; /* stack trace */
 267        unsigned int trace_len;         /* stack trace length */
 268};
 269
 270/* early logging buffer and current position */
 271static struct early_log
 272        early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
 273static int crt_early_log __initdata;
 274
 275static void kmemleak_disable(void);
 276
 277/*
 278 * Print a warning and dump the stack trace.
 279 */
 280#define kmemleak_warn(x...)     do {            \
 281        pr_warn(x);                             \
 282        dump_stack();                           \
 283        kmemleak_warning = 1;                   \
 284} while (0)
 285
 286/*
 287 * Macro invoked when a serious kmemleak condition occurred and cannot be
 288 * recovered from. Kmemleak will be disabled and further allocation/freeing
 289 * tracing no longer available.
 290 */
 291#define kmemleak_stop(x...)     do {    \
 292        kmemleak_warn(x);               \
 293        kmemleak_disable();             \
 294} while (0)
 295
 296/*
 297 * Printing of the objects hex dump to the seq file. The number of lines to be
 298 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
 299 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
 300 * with the object->lock held.
 301 */
 302static void hex_dump_object(struct seq_file *seq,
 303                            struct kmemleak_object *object)
 304{
 305        const u8 *ptr = (const u8 *)object->pointer;
 306        size_t len;
 307
 308        /* limit the number of lines to HEX_MAX_LINES */
 309        len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
 310
 311        seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
 312        kasan_disable_current();
 313        seq_hex_dump(seq, "    ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
 314                     HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
 315        kasan_enable_current();
 316}
 317
 318/*
 319 * Object colors, encoded with count and min_count:
 320 * - white - orphan object, not enough references to it (count < min_count)
 321 * - gray  - not orphan, not marked as false positive (min_count == 0) or
 322 *              sufficient references to it (count >= min_count)
 323 * - black - ignore, it doesn't contain references (e.g. text section)
 324 *              (min_count == -1). No function defined for this color.
 325 * Newly created objects don't have any color assigned (object->count == -1)
 326 * before the next memory scan when they become white.
 327 */
 328static bool color_white(const struct kmemleak_object *object)
 329{
 330        return object->count != KMEMLEAK_BLACK &&
 331                object->count < object->min_count;
 332}
 333
 334static bool color_gray(const struct kmemleak_object *object)
 335{
 336        return object->min_count != KMEMLEAK_BLACK &&
 337                object->count >= object->min_count;
 338}
 339
 340/*
 341 * Objects are considered unreferenced only if their color is white, they have
 342 * not be deleted and have a minimum age to avoid false positives caused by
 343 * pointers temporarily stored in CPU registers.
 344 */
 345static bool unreferenced_object(struct kmemleak_object *object)
 346{
 347        return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
 348                time_before_eq(object->jiffies + jiffies_min_age,
 349                               jiffies_last_scan);
 350}
 351
 352/*
 353 * Printing of the unreferenced objects information to the seq file. The
 354 * print_unreferenced function must be called with the object->lock held.
 355 */
 356static void print_unreferenced(struct seq_file *seq,
 357                               struct kmemleak_object *object)
 358{
 359        int i;
 360        unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
 361
 362        seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
 363                   object->pointer, object->size);
 364        seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
 365                   object->comm, object->pid, object->jiffies,
 366                   msecs_age / 1000, msecs_age % 1000);
 367        hex_dump_object(seq, object);
 368        seq_printf(seq, "  backtrace:\n");
 369
 370        for (i = 0; i < object->trace_len; i++) {
 371                void *ptr = (void *)object->trace[i];
 372                seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
 373        }
 374}
 375
 376/*
 377 * Print the kmemleak_object information. This function is used mainly for
 378 * debugging special cases when kmemleak operations. It must be called with
 379 * the object->lock held.
 380 */
 381static void dump_object_info(struct kmemleak_object *object)
 382{
 383        struct stack_trace trace;
 384
 385        trace.nr_entries = object->trace_len;
 386        trace.entries = object->trace;
 387
 388        pr_notice("Object 0x%08lx (size %zu):\n",
 389                  object->pointer, object->size);
 390        pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
 391                  object->comm, object->pid, object->jiffies);
 392        pr_notice("  min_count = %d\n", object->min_count);
 393        pr_notice("  count = %d\n", object->count);
 394        pr_notice("  flags = 0x%lx\n", object->flags);
 395        pr_notice("  checksum = %u\n", object->checksum);
 396        pr_notice("  backtrace:\n");
 397        print_stack_trace(&trace, 4);
 398}
 399
 400/*
 401 * Look-up a memory block metadata (kmemleak_object) in the object search
 402 * tree based on a pointer value. If alias is 0, only values pointing to the
 403 * beginning of the memory block are allowed. The kmemleak_lock must be held
 404 * when calling this function.
 405 */
 406static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
 407{
 408        struct rb_node *rb = object_tree_root.rb_node;
 409
 410        while (rb) {
 411                struct kmemleak_object *object =
 412                        rb_entry(rb, struct kmemleak_object, rb_node);
 413                if (ptr < object->pointer)
 414                        rb = object->rb_node.rb_left;
 415                else if (object->pointer + object->size <= ptr)
 416                        rb = object->rb_node.rb_right;
 417                else if (object->pointer == ptr || alias)
 418                        return object;
 419                else {
 420                        kmemleak_warn("Found object by alias at 0x%08lx\n",
 421                                      ptr);
 422                        dump_object_info(object);
 423                        break;
 424                }
 425        }
 426        return NULL;
 427}
 428
 429/*
 430 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
 431 * that once an object's use_count reached 0, the RCU freeing was already
 432 * registered and the object should no longer be used. This function must be
 433 * called under the protection of rcu_read_lock().
 434 */
 435static int get_object(struct kmemleak_object *object)
 436{
 437        return atomic_inc_not_zero(&object->use_count);
 438}
 439
 440/*
 441 * RCU callback to free a kmemleak_object.
 442 */
 443static void free_object_rcu(struct rcu_head *rcu)
 444{
 445        struct hlist_node *tmp;
 446        struct kmemleak_scan_area *area;
 447        struct kmemleak_object *object =
 448                container_of(rcu, struct kmemleak_object, rcu);
 449
 450        /*
 451         * Once use_count is 0 (guaranteed by put_object), there is no other
 452         * code accessing this object, hence no need for locking.
 453         */
 454        hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
 455                hlist_del(&area->node);
 456                kmem_cache_free(scan_area_cache, area);
 457        }
 458        kmem_cache_free(object_cache, object);
 459}
 460
 461/*
 462 * Decrement the object use_count. Once the count is 0, free the object using
 463 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
 464 * delete_object() path, the delayed RCU freeing ensures that there is no
 465 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
 466 * is also possible.
 467 */
 468static void put_object(struct kmemleak_object *object)
 469{
 470        if (!atomic_dec_and_test(&object->use_count))
 471                return;
 472
 473        /* should only get here after delete_object was called */
 474        WARN_ON(object->flags & OBJECT_ALLOCATED);
 475
 476        call_rcu(&object->rcu, free_object_rcu);
 477}
 478
 479/*
 480 * Look up an object in the object search tree and increase its use_count.
 481 */
 482static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 483{
 484        unsigned long flags;
 485        struct kmemleak_object *object;
 486
 487        rcu_read_lock();
 488        read_lock_irqsave(&kmemleak_lock, flags);
 489        object = lookup_object(ptr, alias);
 490        read_unlock_irqrestore(&kmemleak_lock, flags);
 491
 492        /* check whether the object is still available */
 493        if (object && !get_object(object))
 494                object = NULL;
 495        rcu_read_unlock();
 496
 497        return object;
 498}
 499
 500/*
 501 * Look up an object in the object search tree and remove it from both
 502 * object_tree_root and object_list. The returned object's use_count should be
 503 * at least 1, as initially set by create_object().
 504 */
 505static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
 506{
 507        unsigned long flags;
 508        struct kmemleak_object *object;
 509
 510        write_lock_irqsave(&kmemleak_lock, flags);
 511        object = lookup_object(ptr, alias);
 512        if (object) {
 513                rb_erase(&object->rb_node, &object_tree_root);
 514                list_del_rcu(&object->object_list);
 515        }
 516        write_unlock_irqrestore(&kmemleak_lock, flags);
 517
 518        return object;
 519}
 520
 521/*
 522 * Save stack trace to the given array of MAX_TRACE size.
 523 */
 524static int __save_stack_trace(unsigned long *trace)
 525{
 526        struct stack_trace stack_trace;
 527
 528        stack_trace.max_entries = MAX_TRACE;
 529        stack_trace.nr_entries = 0;
 530        stack_trace.entries = trace;
 531        stack_trace.skip = 2;
 532        save_stack_trace(&stack_trace);
 533
 534        return stack_trace.nr_entries;
 535}
 536
 537/*
 538 * Create the metadata (struct kmemleak_object) corresponding to an allocated
 539 * memory block and add it to the object_list and object_tree_root.
 540 */
 541static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 542                                             int min_count, gfp_t gfp)
 543{
 544        unsigned long flags;
 545        struct kmemleak_object *object, *parent;
 546        struct rb_node **link, *rb_parent;
 547
 548        object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
 549        if (!object) {
 550                pr_warn("Cannot allocate a kmemleak_object structure\n");
 551                kmemleak_disable();
 552                return NULL;
 553        }
 554
 555        INIT_LIST_HEAD(&object->object_list);
 556        INIT_LIST_HEAD(&object->gray_list);
 557        INIT_HLIST_HEAD(&object->area_list);
 558        spin_lock_init(&object->lock);
 559        atomic_set(&object->use_count, 1);
 560        object->flags = OBJECT_ALLOCATED;
 561        object->pointer = ptr;
 562        object->size = size;
 563        object->min_count = min_count;
 564        object->count = 0;                      /* white color initially */
 565        object->jiffies = jiffies;
 566        object->checksum = 0;
 567
 568        /* task information */
 569        if (in_irq()) {
 570                object->pid = 0;
 571                strncpy(object->comm, "hardirq", sizeof(object->comm));
 572        } else if (in_softirq()) {
 573                object->pid = 0;
 574                strncpy(object->comm, "softirq", sizeof(object->comm));
 575        } else {
 576                object->pid = current->pid;
 577                /*
 578                 * There is a small chance of a race with set_task_comm(),
 579                 * however using get_task_comm() here may cause locking
 580                 * dependency issues with current->alloc_lock. In the worst
 581                 * case, the command line is not correct.
 582                 */
 583                strncpy(object->comm, current->comm, sizeof(object->comm));
 584        }
 585
 586        /* kernel backtrace */
 587        object->trace_len = __save_stack_trace(object->trace);
 588
 589        write_lock_irqsave(&kmemleak_lock, flags);
 590
 591        min_addr = min(min_addr, ptr);
 592        max_addr = max(max_addr, ptr + size);
 593        link = &object_tree_root.rb_node;
 594        rb_parent = NULL;
 595        while (*link) {
 596                rb_parent = *link;
 597                parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
 598                if (ptr + size <= parent->pointer)
 599                        link = &parent->rb_node.rb_left;
 600                else if (parent->pointer + parent->size <= ptr)
 601                        link = &parent->rb_node.rb_right;
 602                else {
 603                        kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
 604                                      ptr);
 605                        /*
 606                         * No need for parent->lock here since "parent" cannot
 607                         * be freed while the kmemleak_lock is held.
 608                         */
 609                        dump_object_info(parent);
 610                        kmem_cache_free(object_cache, object);
 611                        object = NULL;
 612                        goto out;
 613                }
 614        }
 615        rb_link_node(&object->rb_node, rb_parent, link);
 616        rb_insert_color(&object->rb_node, &object_tree_root);
 617
 618        list_add_tail_rcu(&object->object_list, &object_list);
 619out:
 620        write_unlock_irqrestore(&kmemleak_lock, flags);
 621        return object;
 622}
 623
 624/*
 625 * Mark the object as not allocated and schedule RCU freeing via put_object().
 626 */
 627static void __delete_object(struct kmemleak_object *object)
 628{
 629        unsigned long flags;
 630
 631        WARN_ON(!(object->flags & OBJECT_ALLOCATED));
 632        WARN_ON(atomic_read(&object->use_count) < 1);
 633
 634        /*
 635         * Locking here also ensures that the corresponding memory block
 636         * cannot be freed when it is being scanned.
 637         */
 638        spin_lock_irqsave(&object->lock, flags);
 639        object->flags &= ~OBJECT_ALLOCATED;
 640        spin_unlock_irqrestore(&object->lock, flags);
 641        put_object(object);
 642}
 643
 644/*
 645 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 646 * delete it.
 647 */
 648static void delete_object_full(unsigned long ptr)
 649{
 650        struct kmemleak_object *object;
 651
 652        object = find_and_remove_object(ptr, 0);
 653        if (!object) {
 654#ifdef DEBUG
 655                kmemleak_warn("Freeing unknown object at 0x%08lx\n",
 656                              ptr);
 657#endif
 658                return;
 659        }
 660        __delete_object(object);
 661}
 662
 663/*
 664 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 665 * delete it. If the memory block is partially freed, the function may create
 666 * additional metadata for the remaining parts of the block.
 667 */
 668static void delete_object_part(unsigned long ptr, size_t size)
 669{
 670        struct kmemleak_object *object;
 671        unsigned long start, end;
 672
 673        object = find_and_remove_object(ptr, 1);
 674        if (!object) {
 675#ifdef DEBUG
 676                kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
 677                              ptr, size);
 678#endif
 679                return;
 680        }
 681
 682        /*
 683         * Create one or two objects that may result from the memory block
 684         * split. Note that partial freeing is only done by free_bootmem() and
 685         * this happens before kmemleak_init() is called. The path below is
 686         * only executed during early log recording in kmemleak_init(), so
 687         * GFP_KERNEL is enough.
 688         */
 689        start = object->pointer;
 690        end = object->pointer + object->size;
 691        if (ptr > start)
 692                create_object(start, ptr - start, object->min_count,
 693                              GFP_KERNEL);
 694        if (ptr + size < end)
 695                create_object(ptr + size, end - ptr - size, object->min_count,
 696                              GFP_KERNEL);
 697
 698        __delete_object(object);
 699}
 700
 701static void __paint_it(struct kmemleak_object *object, int color)
 702{
 703        object->min_count = color;
 704        if (color == KMEMLEAK_BLACK)
 705                object->flags |= OBJECT_NO_SCAN;
 706}
 707
 708static void paint_it(struct kmemleak_object *object, int color)
 709{
 710        unsigned long flags;
 711
 712        spin_lock_irqsave(&object->lock, flags);
 713        __paint_it(object, color);
 714        spin_unlock_irqrestore(&object->lock, flags);
 715}
 716
 717static void paint_ptr(unsigned long ptr, int color)
 718{
 719        struct kmemleak_object *object;
 720
 721        object = find_and_get_object(ptr, 0);
 722        if (!object) {
 723                kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
 724                              ptr,
 725                              (color == KMEMLEAK_GREY) ? "Grey" :
 726                              (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
 727                return;
 728        }
 729        paint_it(object, color);
 730        put_object(object);
 731}
 732
 733/*
 734 * Mark an object permanently as gray-colored so that it can no longer be
 735 * reported as a leak. This is used in general to mark a false positive.
 736 */
 737static void make_gray_object(unsigned long ptr)
 738{
 739        paint_ptr(ptr, KMEMLEAK_GREY);
 740}
 741
 742/*
 743 * Mark the object as black-colored so that it is ignored from scans and
 744 * reporting.
 745 */
 746static void make_black_object(unsigned long ptr)
 747{
 748        paint_ptr(ptr, KMEMLEAK_BLACK);
 749}
 750
 751/*
 752 * Add a scanning area to the object. If at least one such area is added,
 753 * kmemleak will only scan these ranges rather than the whole memory block.
 754 */
 755static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 756{
 757        unsigned long flags;
 758        struct kmemleak_object *object;
 759        struct kmemleak_scan_area *area;
 760
 761        object = find_and_get_object(ptr, 1);
 762        if (!object) {
 763                kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
 764                              ptr);
 765                return;
 766        }
 767
 768        area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 769        if (!area) {
 770                pr_warn("Cannot allocate a scan area\n");
 771                goto out;
 772        }
 773
 774        spin_lock_irqsave(&object->lock, flags);
 775        if (size == SIZE_MAX) {
 776                size = object->pointer + object->size - ptr;
 777        } else if (ptr + size > object->pointer + object->size) {
 778                kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 779                dump_object_info(object);
 780                kmem_cache_free(scan_area_cache, area);
 781                goto out_unlock;
 782        }
 783
 784        INIT_HLIST_NODE(&area->node);
 785        area->start = ptr;
 786        area->size = size;
 787
 788        hlist_add_head(&area->node, &object->area_list);
 789out_unlock:
 790        spin_unlock_irqrestore(&object->lock, flags);
 791out:
 792        put_object(object);
 793}
 794
 795/*
 796 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
 797 * pointer. Such object will not be scanned by kmemleak but references to it
 798 * are searched.
 799 */
 800static void object_no_scan(unsigned long ptr)
 801{
 802        unsigned long flags;
 803        struct kmemleak_object *object;
 804
 805        object = find_and_get_object(ptr, 0);
 806        if (!object) {
 807                kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
 808                return;
 809        }
 810
 811        spin_lock_irqsave(&object->lock, flags);
 812        object->flags |= OBJECT_NO_SCAN;
 813        spin_unlock_irqrestore(&object->lock, flags);
 814        put_object(object);
 815}
 816
 817/*
 818 * Log an early kmemleak_* call to the early_log buffer. These calls will be
 819 * processed later once kmemleak is fully initialized.
 820 */
 821static void __init log_early(int op_type, const void *ptr, size_t size,
 822                             int min_count)
 823{
 824        unsigned long flags;
 825        struct early_log *log;
 826
 827        if (kmemleak_error) {
 828                /* kmemleak stopped recording, just count the requests */
 829                crt_early_log++;
 830                return;
 831        }
 832
 833        if (crt_early_log >= ARRAY_SIZE(early_log)) {
 834                crt_early_log++;
 835                kmemleak_disable();
 836                return;
 837        }
 838
 839        /*
 840         * There is no need for locking since the kernel is still in UP mode
 841         * at this stage. Disabling the IRQs is enough.
 842         */
 843        local_irq_save(flags);
 844        log = &early_log[crt_early_log];
 845        log->op_type = op_type;
 846        log->ptr = ptr;
 847        log->size = size;
 848        log->min_count = min_count;
 849        log->trace_len = __save_stack_trace(log->trace);
 850        crt_early_log++;
 851        local_irq_restore(flags);
 852}
 853
 854/*
 855 * Log an early allocated block and populate the stack trace.
 856 */
 857static void early_alloc(struct early_log *log)
 858{
 859        struct kmemleak_object *object;
 860        unsigned long flags;
 861        int i;
 862
 863        if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
 864                return;
 865
 866        /*
 867         * RCU locking needed to ensure object is not freed via put_object().
 868         */
 869        rcu_read_lock();
 870        object = create_object((unsigned long)log->ptr, log->size,
 871                               log->min_count, GFP_ATOMIC);
 872        if (!object)
 873                goto out;
 874        spin_lock_irqsave(&object->lock, flags);
 875        for (i = 0; i < log->trace_len; i++)
 876                object->trace[i] = log->trace[i];
 877        object->trace_len = log->trace_len;
 878        spin_unlock_irqrestore(&object->lock, flags);
 879out:
 880        rcu_read_unlock();
 881}
 882
 883/*
 884 * Log an early allocated block and populate the stack trace.
 885 */
 886static void early_alloc_percpu(struct early_log *log)
 887{
 888        unsigned int cpu;
 889        const void __percpu *ptr = log->ptr;
 890
 891        for_each_possible_cpu(cpu) {
 892                log->ptr = per_cpu_ptr(ptr, cpu);
 893                early_alloc(log);
 894        }
 895}
 896
 897/**
 898 * kmemleak_alloc - register a newly allocated object
 899 * @ptr:        pointer to beginning of the object
 900 * @size:       size of the object
 901 * @min_count:  minimum number of references to this object. If during memory
 902 *              scanning a number of references less than @min_count is found,
 903 *              the object is reported as a memory leak. If @min_count is 0,
 904 *              the object is never reported as a leak. If @min_count is -1,
 905 *              the object is ignored (not scanned and not reported as a leak)
 906 * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
 907 *
 908 * This function is called from the kernel allocators when a new object
 909 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
 910 */
 911void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 912                          gfp_t gfp)
 913{
 914        pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
 915
 916        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 917                create_object((unsigned long)ptr, size, min_count, gfp);
 918        else if (kmemleak_early_log)
 919                log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
 920}
 921EXPORT_SYMBOL_GPL(kmemleak_alloc);
 922
 923/**
 924 * kmemleak_alloc_percpu - register a newly allocated __percpu object
 925 * @ptr:        __percpu pointer to beginning of the object
 926 * @size:       size of the object
 927 * @gfp:        flags used for kmemleak internal memory allocations
 928 *
 929 * This function is called from the kernel percpu allocator when a new object
 930 * (memory block) is allocated (alloc_percpu).
 931 */
 932void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
 933                                 gfp_t gfp)
 934{
 935        unsigned int cpu;
 936
 937        pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
 938
 939        /*
 940         * Percpu allocations are only scanned and not reported as leaks
 941         * (min_count is set to 0).
 942         */
 943        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 944                for_each_possible_cpu(cpu)
 945                        create_object((unsigned long)per_cpu_ptr(ptr, cpu),
 946                                      size, 0, gfp);
 947        else if (kmemleak_early_log)
 948                log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
 949}
 950EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
 951
 952/**
 953 * kmemleak_free - unregister a previously registered object
 954 * @ptr:        pointer to beginning of the object
 955 *
 956 * This function is called from the kernel allocators when an object (memory
 957 * block) is freed (kmem_cache_free, kfree, vfree etc.).
 958 */
 959void __ref kmemleak_free(const void *ptr)
 960{
 961        pr_debug("%s(0x%p)\n", __func__, ptr);
 962
 963        if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
 964                delete_object_full((unsigned long)ptr);
 965        else if (kmemleak_early_log)
 966                log_early(KMEMLEAK_FREE, ptr, 0, 0);
 967}
 968EXPORT_SYMBOL_GPL(kmemleak_free);
 969
 970/**
 971 * kmemleak_free_part - partially unregister a previously registered object
 972 * @ptr:        pointer to the beginning or inside the object. This also
 973 *              represents the start of the range to be freed
 974 * @size:       size to be unregistered
 975 *
 976 * This function is called when only a part of a memory block is freed
 977 * (usually from the bootmem allocator).
 978 */
 979void __ref kmemleak_free_part(const void *ptr, size_t size)
 980{
 981        pr_debug("%s(0x%p)\n", __func__, ptr);
 982
 983        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 984                delete_object_part((unsigned long)ptr, size);
 985        else if (kmemleak_early_log)
 986                log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
 987}
 988EXPORT_SYMBOL_GPL(kmemleak_free_part);
 989
 990/**
 991 * kmemleak_free_percpu - unregister a previously registered __percpu object
 992 * @ptr:        __percpu pointer to beginning of the object
 993 *
 994 * This function is called from the kernel percpu allocator when an object
 995 * (memory block) is freed (free_percpu).
 996 */
 997void __ref kmemleak_free_percpu(const void __percpu *ptr)
 998{
 999        unsigned int cpu;
1000
1001        pr_debug("%s(0x%p)\n", __func__, ptr);
1002
1003        if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1004                for_each_possible_cpu(cpu)
1005                        delete_object_full((unsigned long)per_cpu_ptr(ptr,
1006                                                                      cpu));
1007        else if (kmemleak_early_log)
1008                log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1009}
1010EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1011
1012/**
1013 * kmemleak_update_trace - update object allocation stack trace
1014 * @ptr:        pointer to beginning of the object
1015 *
1016 * Override the object allocation stack trace for cases where the actual
1017 * allocation place is not always useful.
1018 */
1019void __ref kmemleak_update_trace(const void *ptr)
1020{
1021        struct kmemleak_object *object;
1022        unsigned long flags;
1023
1024        pr_debug("%s(0x%p)\n", __func__, ptr);
1025
1026        if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1027                return;
1028
1029        object = find_and_get_object((unsigned long)ptr, 1);
1030        if (!object) {
1031#ifdef DEBUG
1032                kmemleak_warn("Updating stack trace for unknown object at %p\n",
1033                              ptr);
1034#endif
1035                return;
1036        }
1037
1038        spin_lock_irqsave(&object->lock, flags);
1039        object->trace_len = __save_stack_trace(object->trace);
1040        spin_unlock_irqrestore(&object->lock, flags);
1041
1042        put_object(object);
1043}
1044EXPORT_SYMBOL(kmemleak_update_trace);
1045
1046/**
1047 * kmemleak_not_leak - mark an allocated object as false positive
1048 * @ptr:        pointer to beginning of the object
1049 *
1050 * Calling this function on an object will cause the memory block to no longer
1051 * be reported as leak and always be scanned.
1052 */
1053void __ref kmemleak_not_leak(const void *ptr)
1054{
1055        pr_debug("%s(0x%p)\n", __func__, ptr);
1056
1057        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1058                make_gray_object((unsigned long)ptr);
1059        else if (kmemleak_early_log)
1060                log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1061}
1062EXPORT_SYMBOL(kmemleak_not_leak);
1063
1064/**
1065 * kmemleak_ignore - ignore an allocated object
1066 * @ptr:        pointer to beginning of the object
1067 *
1068 * Calling this function on an object will cause the memory block to be
1069 * ignored (not scanned and not reported as a leak). This is usually done when
1070 * it is known that the corresponding block is not a leak and does not contain
1071 * any references to other allocated memory blocks.
1072 */
1073void __ref kmemleak_ignore(const void *ptr)
1074{
1075        pr_debug("%s(0x%p)\n", __func__, ptr);
1076
1077        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1078                make_black_object((unsigned long)ptr);
1079        else if (kmemleak_early_log)
1080                log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1081}
1082EXPORT_SYMBOL(kmemleak_ignore);
1083
1084/**
1085 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1086 * @ptr:        pointer to beginning or inside the object. This also
1087 *              represents the start of the scan area
1088 * @size:       size of the scan area
1089 * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1090 *
1091 * This function is used when it is known that only certain parts of an object
1092 * contain references to other objects. Kmemleak will only scan these areas
1093 * reducing the number false negatives.
1094 */
1095void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1096{
1097        pr_debug("%s(0x%p)\n", __func__, ptr);
1098
1099        if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1100                add_scan_area((unsigned long)ptr, size, gfp);
1101        else if (kmemleak_early_log)
1102                log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1103}
1104EXPORT_SYMBOL(kmemleak_scan_area);
1105
1106/**
1107 * kmemleak_no_scan - do not scan an allocated object
1108 * @ptr:        pointer to beginning of the object
1109 *
1110 * This function notifies kmemleak not to scan the given memory block. Useful
1111 * in situations where it is known that the given object does not contain any
1112 * references to other objects. Kmemleak will not scan such objects reducing
1113 * the number of false negatives.
1114 */
1115void __ref kmemleak_no_scan(const void *ptr)
1116{
1117        pr_debug("%s(0x%p)\n", __func__, ptr);
1118
1119        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1120                object_no_scan((unsigned long)ptr);
1121        else if (kmemleak_early_log)
1122                log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1123}
1124EXPORT_SYMBOL(kmemleak_no_scan);
1125
1126/**
1127 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1128 *                       address argument
1129 */
1130void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1131                               gfp_t gfp)
1132{
1133        if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1134                kmemleak_alloc(__va(phys), size, min_count, gfp);
1135}
1136EXPORT_SYMBOL(kmemleak_alloc_phys);
1137
1138/**
1139 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1140 *                           physical address argument
1141 */
1142void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1143{
1144        if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1145                kmemleak_free_part(__va(phys), size);
1146}
1147EXPORT_SYMBOL(kmemleak_free_part_phys);
1148
1149/**
1150 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1151 *                          address argument
1152 */
1153void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1154{
1155        if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1156                kmemleak_not_leak(__va(phys));
1157}
1158EXPORT_SYMBOL(kmemleak_not_leak_phys);
1159
1160/**
1161 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1162 *                        address argument
1163 */
1164void __ref kmemleak_ignore_phys(phys_addr_t phys)
1165{
1166        if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1167                kmemleak_ignore(__va(phys));
1168}
1169EXPORT_SYMBOL(kmemleak_ignore_phys);
1170
1171/*
1172 * Update an object's checksum and return true if it was modified.
1173 */
1174static bool update_checksum(struct kmemleak_object *object)
1175{
1176        u32 old_csum = object->checksum;
1177
1178        if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1179                return false;
1180
1181        kasan_disable_current();
1182        object->checksum = crc32(0, (void *)object->pointer, object->size);
1183        kasan_enable_current();
1184
1185        return object->checksum != old_csum;
1186}
1187
1188/*
1189 * Memory scanning is a long process and it needs to be interruptable. This
1190 * function checks whether such interrupt condition occurred.
1191 */
1192static int scan_should_stop(void)
1193{
1194        if (!kmemleak_enabled)
1195                return 1;
1196
1197        /*
1198         * This function may be called from either process or kthread context,
1199         * hence the need to check for both stop conditions.
1200         */
1201        if (current->mm)
1202                return signal_pending(current);
1203        else
1204                return kthread_should_stop();
1205
1206        return 0;
1207}
1208
1209/*
1210 * Scan a memory block (exclusive range) for valid pointers and add those
1211 * found to the gray list.
1212 */
1213static void scan_block(void *_start, void *_end,
1214                       struct kmemleak_object *scanned)
1215{
1216        unsigned long *ptr;
1217        unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1218        unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1219        unsigned long flags;
1220
1221        read_lock_irqsave(&kmemleak_lock, flags);
1222        for (ptr = start; ptr < end; ptr++) {
1223                struct kmemleak_object *object;
1224                unsigned long pointer;
1225
1226                if (scan_should_stop())
1227                        break;
1228
1229                /* don't scan uninitialized memory */
1230                if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1231                                                  BYTES_PER_POINTER))
1232                        continue;
1233
1234                kasan_disable_current();
1235                pointer = *ptr;
1236                kasan_enable_current();
1237
1238                if (pointer < min_addr || pointer >= max_addr)
1239                        continue;
1240
1241                /*
1242                 * No need for get_object() here since we hold kmemleak_lock.
1243                 * object->use_count cannot be dropped to 0 while the object
1244                 * is still present in object_tree_root and object_list
1245                 * (with updates protected by kmemleak_lock).
1246                 */
1247                object = lookup_object(pointer, 1);
1248                if (!object)
1249                        continue;
1250                if (object == scanned)
1251                        /* self referenced, ignore */
1252                        continue;
1253
1254                /*
1255                 * Avoid the lockdep recursive warning on object->lock being
1256                 * previously acquired in scan_object(). These locks are
1257                 * enclosed by scan_mutex.
1258                 */
1259                spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1260                if (!color_white(object)) {
1261                        /* non-orphan, ignored or new */
1262                        spin_unlock(&object->lock);
1263                        continue;
1264                }
1265
1266                /*
1267                 * Increase the object's reference count (number of pointers
1268                 * to the memory block). If this count reaches the required
1269                 * minimum, the object's color will become gray and it will be
1270                 * added to the gray_list.
1271                 */
1272                object->count++;
1273                if (color_gray(object)) {
1274                        /* put_object() called when removing from gray_list */
1275                        WARN_ON(!get_object(object));
1276                        list_add_tail(&object->gray_list, &gray_list);
1277                }
1278                spin_unlock(&object->lock);
1279        }
1280        read_unlock_irqrestore(&kmemleak_lock, flags);
1281}
1282
1283/*
1284 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1285 */
1286static void scan_large_block(void *start, void *end)
1287{
1288        void *next;
1289
1290        while (start < end) {
1291                next = min(start + MAX_SCAN_SIZE, end);
1292                scan_block(start, next, NULL);
1293                start = next;
1294                cond_resched();
1295        }
1296}
1297
1298/*
1299 * Scan a memory block corresponding to a kmemleak_object. A condition is
1300 * that object->use_count >= 1.
1301 */
1302static void scan_object(struct kmemleak_object *object)
1303{
1304        struct kmemleak_scan_area *area;
1305        unsigned long flags;
1306
1307        /*
1308         * Once the object->lock is acquired, the corresponding memory block
1309         * cannot be freed (the same lock is acquired in delete_object).
1310         */
1311        spin_lock_irqsave(&object->lock, flags);
1312        if (object->flags & OBJECT_NO_SCAN)
1313                goto out;
1314        if (!(object->flags & OBJECT_ALLOCATED))
1315                /* already freed object */
1316                goto out;
1317        if (hlist_empty(&object->area_list)) {
1318                void *start = (void *)object->pointer;
1319                void *end = (void *)(object->pointer + object->size);
1320                void *next;
1321
1322                do {
1323                        next = min(start + MAX_SCAN_SIZE, end);
1324                        scan_block(start, next, object);
1325
1326                        start = next;
1327                        if (start >= end)
1328                                break;
1329
1330                        spin_unlock_irqrestore(&object->lock, flags);
1331                        cond_resched();
1332                        spin_lock_irqsave(&object->lock, flags);
1333                } while (object->flags & OBJECT_ALLOCATED);
1334        } else
1335                hlist_for_each_entry(area, &object->area_list, node)
1336                        scan_block((void *)area->start,
1337                                   (void *)(area->start + area->size),
1338                                   object);
1339out:
1340        spin_unlock_irqrestore(&object->lock, flags);
1341}
1342
1343/*
1344 * Scan the objects already referenced (gray objects). More objects will be
1345 * referenced and, if there are no memory leaks, all the objects are scanned.
1346 */
1347static void scan_gray_list(void)
1348{
1349        struct kmemleak_object *object, *tmp;
1350
1351        /*
1352         * The list traversal is safe for both tail additions and removals
1353         * from inside the loop. The kmemleak objects cannot be freed from
1354         * outside the loop because their use_count was incremented.
1355         */
1356        object = list_entry(gray_list.next, typeof(*object), gray_list);
1357        while (&object->gray_list != &gray_list) {
1358                cond_resched();
1359
1360                /* may add new objects to the list */
1361                if (!scan_should_stop())
1362                        scan_object(object);
1363
1364                tmp = list_entry(object->gray_list.next, typeof(*object),
1365                                 gray_list);
1366
1367                /* remove the object from the list and release it */
1368                list_del(&object->gray_list);
1369                put_object(object);
1370
1371                object = tmp;
1372        }
1373        WARN_ON(!list_empty(&gray_list));
1374}
1375
1376/*
1377 * Scan data sections and all the referenced memory blocks allocated via the
1378 * kernel's standard allocators. This function must be called with the
1379 * scan_mutex held.
1380 */
1381static void kmemleak_scan(void)
1382{
1383        unsigned long flags;
1384        struct kmemleak_object *object;
1385        int i;
1386        int new_leaks = 0;
1387
1388        jiffies_last_scan = jiffies;
1389
1390        /* prepare the kmemleak_object's */
1391        rcu_read_lock();
1392        list_for_each_entry_rcu(object, &object_list, object_list) {
1393                spin_lock_irqsave(&object->lock, flags);
1394#ifdef DEBUG
1395                /*
1396                 * With a few exceptions there should be a maximum of
1397                 * 1 reference to any object at this point.
1398                 */
1399                if (atomic_read(&object->use_count) > 1) {
1400                        pr_debug("object->use_count = %d\n",
1401                                 atomic_read(&object->use_count));
1402                        dump_object_info(object);
1403                }
1404#endif
1405                /* reset the reference count (whiten the object) */
1406                object->count = 0;
1407                if (color_gray(object) && get_object(object))
1408                        list_add_tail(&object->gray_list, &gray_list);
1409
1410                spin_unlock_irqrestore(&object->lock, flags);
1411        }
1412        rcu_read_unlock();
1413
1414        /* data/bss scanning */
1415        scan_large_block(_sdata, _edata);
1416        scan_large_block(__bss_start, __bss_stop);
1417        scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init);
1418
1419#ifdef CONFIG_SMP
1420        /* per-cpu sections scanning */
1421        for_each_possible_cpu(i)
1422                scan_large_block(__per_cpu_start + per_cpu_offset(i),
1423                                 __per_cpu_end + per_cpu_offset(i));
1424#endif
1425
1426        /*
1427         * Struct page scanning for each node.
1428         */
1429        get_online_mems();
1430        for_each_online_node(i) {
1431                unsigned long start_pfn = node_start_pfn(i);
1432                unsigned long end_pfn = node_end_pfn(i);
1433                unsigned long pfn;
1434
1435                for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1436                        struct page *page;
1437
1438                        if (!pfn_valid(pfn))
1439                                continue;
1440                        page = pfn_to_page(pfn);
1441                        /* only scan if page is in use */
1442                        if (page_count(page) == 0)
1443                                continue;
1444                        scan_block(page, page + 1, NULL);
1445                }
1446        }
1447        put_online_mems();
1448
1449        /*
1450         * Scanning the task stacks (may introduce false negatives).
1451         */
1452        if (kmemleak_stack_scan) {
1453                struct task_struct *p, *g;
1454
1455                read_lock(&tasklist_lock);
1456                do_each_thread(g, p) {
1457                        void *stack = try_get_task_stack(p);
1458                        if (stack) {
1459                                scan_block(stack, stack + THREAD_SIZE, NULL);
1460                                put_task_stack(p);
1461                        }
1462                } while_each_thread(g, p);
1463                read_unlock(&tasklist_lock);
1464        }
1465
1466        /*
1467         * Scan the objects already referenced from the sections scanned
1468         * above.
1469         */
1470        scan_gray_list();
1471
1472        /*
1473         * Check for new or unreferenced objects modified since the previous
1474         * scan and color them gray until the next scan.
1475         */
1476        rcu_read_lock();
1477        list_for_each_entry_rcu(object, &object_list, object_list) {
1478                spin_lock_irqsave(&object->lock, flags);
1479                if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1480                    && update_checksum(object) && get_object(object)) {
1481                        /* color it gray temporarily */
1482                        object->count = object->min_count;
1483                        list_add_tail(&object->gray_list, &gray_list);
1484                }
1485                spin_unlock_irqrestore(&object->lock, flags);
1486        }
1487        rcu_read_unlock();
1488
1489        /*
1490         * Re-scan the gray list for modified unreferenced objects.
1491         */
1492        scan_gray_list();
1493
1494        /*
1495         * If scanning was stopped do not report any new unreferenced objects.
1496         */
1497        if (scan_should_stop())
1498                return;
1499
1500        /*
1501         * Scanning result reporting.
1502         */
1503        rcu_read_lock();
1504        list_for_each_entry_rcu(object, &object_list, object_list) {
1505                spin_lock_irqsave(&object->lock, flags);
1506                if (unreferenced_object(object) &&
1507                    !(object->flags & OBJECT_REPORTED)) {
1508                        object->flags |= OBJECT_REPORTED;
1509                        new_leaks++;
1510                }
1511                spin_unlock_irqrestore(&object->lock, flags);
1512        }
1513        rcu_read_unlock();
1514
1515        if (new_leaks) {
1516                kmemleak_found_leaks = true;
1517
1518                pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1519                        new_leaks);
1520        }
1521
1522}
1523
1524/*
1525 * Thread function performing automatic memory scanning. Unreferenced objects
1526 * at the end of a memory scan are reported but only the first time.
1527 */
1528static int kmemleak_scan_thread(void *arg)
1529{
1530        static int first_run = 1;
1531
1532        pr_info("Automatic memory scanning thread started\n");
1533        set_user_nice(current, 10);
1534
1535        /*
1536         * Wait before the first scan to allow the system to fully initialize.
1537         */
1538        if (first_run) {
1539                signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1540                first_run = 0;
1541                while (timeout && !kthread_should_stop())
1542                        timeout = schedule_timeout_interruptible(timeout);
1543        }
1544
1545        while (!kthread_should_stop()) {
1546                signed long timeout = jiffies_scan_wait;
1547
1548                mutex_lock(&scan_mutex);
1549                kmemleak_scan();
1550                mutex_unlock(&scan_mutex);
1551
1552                /* wait before the next scan */
1553                while (timeout && !kthread_should_stop())
1554                        timeout = schedule_timeout_interruptible(timeout);
1555        }
1556
1557        pr_info("Automatic memory scanning thread ended\n");
1558
1559        return 0;
1560}
1561
1562/*
1563 * Start the automatic memory scanning thread. This function must be called
1564 * with the scan_mutex held.
1565 */
1566static void start_scan_thread(void)
1567{
1568        if (scan_thread)
1569                return;
1570        scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1571        if (IS_ERR(scan_thread)) {
1572                pr_warn("Failed to create the scan thread\n");
1573                scan_thread = NULL;
1574        }
1575}
1576
1577/*
1578 * Stop the automatic memory scanning thread. This function must be called
1579 * with the scan_mutex held.
1580 */
1581static void stop_scan_thread(void)
1582{
1583        if (scan_thread) {
1584                kthread_stop(scan_thread);
1585                scan_thread = NULL;
1586        }
1587}
1588
1589/*
1590 * Iterate over the object_list and return the first valid object at or after
1591 * the required position with its use_count incremented. The function triggers
1592 * a memory scanning when the pos argument points to the first position.
1593 */
1594static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1595{
1596        struct kmemleak_object *object;
1597        loff_t n = *pos;
1598        int err;
1599
1600        err = mutex_lock_interruptible(&scan_mutex);
1601        if (err < 0)
1602                return ERR_PTR(err);
1603
1604        rcu_read_lock();
1605        list_for_each_entry_rcu(object, &object_list, object_list) {
1606                if (n-- > 0)
1607                        continue;
1608                if (get_object(object))
1609                        goto out;
1610        }
1611        object = NULL;
1612out:
1613        return object;
1614}
1615
1616/*
1617 * Return the next object in the object_list. The function decrements the
1618 * use_count of the previous object and increases that of the next one.
1619 */
1620static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1621{
1622        struct kmemleak_object *prev_obj = v;
1623        struct kmemleak_object *next_obj = NULL;
1624        struct kmemleak_object *obj = prev_obj;
1625
1626        ++(*pos);
1627
1628        list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1629                if (get_object(obj)) {
1630                        next_obj = obj;
1631                        break;
1632                }
1633        }
1634
1635        put_object(prev_obj);
1636        return next_obj;
1637}
1638
1639/*
1640 * Decrement the use_count of the last object required, if any.
1641 */
1642static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1643{
1644        if (!IS_ERR(v)) {
1645                /*
1646                 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1647                 * waiting was interrupted, so only release it if !IS_ERR.
1648                 */
1649                rcu_read_unlock();
1650                mutex_unlock(&scan_mutex);
1651                if (v)
1652                        put_object(v);
1653        }
1654}
1655
1656/*
1657 * Print the information for an unreferenced object to the seq file.
1658 */
1659static int kmemleak_seq_show(struct seq_file *seq, void *v)
1660{
1661        struct kmemleak_object *object = v;
1662        unsigned long flags;
1663
1664        spin_lock_irqsave(&object->lock, flags);
1665        if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1666                print_unreferenced(seq, object);
1667        spin_unlock_irqrestore(&object->lock, flags);
1668        return 0;
1669}
1670
1671static const struct seq_operations kmemleak_seq_ops = {
1672        .start = kmemleak_seq_start,
1673        .next  = kmemleak_seq_next,
1674        .stop  = kmemleak_seq_stop,
1675        .show  = kmemleak_seq_show,
1676};
1677
1678static int kmemleak_open(struct inode *inode, struct file *file)
1679{
1680        return seq_open(file, &kmemleak_seq_ops);
1681}
1682
1683static int dump_str_object_info(const char *str)
1684{
1685        unsigned long flags;
1686        struct kmemleak_object *object;
1687        unsigned long addr;
1688
1689        if (kstrtoul(str, 0, &addr))
1690                return -EINVAL;
1691        object = find_and_get_object(addr, 0);
1692        if (!object) {
1693                pr_info("Unknown object at 0x%08lx\n", addr);
1694                return -EINVAL;
1695        }
1696
1697        spin_lock_irqsave(&object->lock, flags);
1698        dump_object_info(object);
1699        spin_unlock_irqrestore(&object->lock, flags);
1700
1701        put_object(object);
1702        return 0;
1703}
1704
1705/*
1706 * We use grey instead of black to ensure we can do future scans on the same
1707 * objects. If we did not do future scans these black objects could
1708 * potentially contain references to newly allocated objects in the future and
1709 * we'd end up with false positives.
1710 */
1711static void kmemleak_clear(void)
1712{
1713        struct kmemleak_object *object;
1714        unsigned long flags;
1715
1716        rcu_read_lock();
1717        list_for_each_entry_rcu(object, &object_list, object_list) {
1718                spin_lock_irqsave(&object->lock, flags);
1719                if ((object->flags & OBJECT_REPORTED) &&
1720                    unreferenced_object(object))
1721                        __paint_it(object, KMEMLEAK_GREY);
1722                spin_unlock_irqrestore(&object->lock, flags);
1723        }
1724        rcu_read_unlock();
1725
1726        kmemleak_found_leaks = false;
1727}
1728
1729static void __kmemleak_do_cleanup(void);
1730
1731/*
1732 * File write operation to configure kmemleak at run-time. The following
1733 * commands can be written to the /sys/kernel/debug/kmemleak file:
1734 *   off        - disable kmemleak (irreversible)
1735 *   stack=on   - enable the task stacks scanning
1736 *   stack=off  - disable the tasks stacks scanning
1737 *   scan=on    - start the automatic memory scanning thread
1738 *   scan=off   - stop the automatic memory scanning thread
1739 *   scan=...   - set the automatic memory scanning period in seconds (0 to
1740 *                disable it)
1741 *   scan       - trigger a memory scan
1742 *   clear      - mark all current reported unreferenced kmemleak objects as
1743 *                grey to ignore printing them, or free all kmemleak objects
1744 *                if kmemleak has been disabled.
1745 *   dump=...   - dump information about the object found at the given address
1746 */
1747static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1748                              size_t size, loff_t *ppos)
1749{
1750        char buf[64];
1751        int buf_size;
1752        int ret;
1753
1754        buf_size = min(size, (sizeof(buf) - 1));
1755        if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1756                return -EFAULT;
1757        buf[buf_size] = 0;
1758
1759        ret = mutex_lock_interruptible(&scan_mutex);
1760        if (ret < 0)
1761                return ret;
1762
1763        if (strncmp(buf, "clear", 5) == 0) {
1764                if (kmemleak_enabled)
1765                        kmemleak_clear();
1766                else
1767                        __kmemleak_do_cleanup();
1768                goto out;
1769        }
1770
1771        if (!kmemleak_enabled) {
1772                ret = -EBUSY;
1773                goto out;
1774        }
1775
1776        if (strncmp(buf, "off", 3) == 0)
1777                kmemleak_disable();
1778        else if (strncmp(buf, "stack=on", 8) == 0)
1779                kmemleak_stack_scan = 1;
1780        else if (strncmp(buf, "stack=off", 9) == 0)
1781                kmemleak_stack_scan = 0;
1782        else if (strncmp(buf, "scan=on", 7) == 0)
1783                start_scan_thread();
1784        else if (strncmp(buf, "scan=off", 8) == 0)
1785                stop_scan_thread();
1786        else if (strncmp(buf, "scan=", 5) == 0) {
1787                unsigned long secs;
1788
1789                ret = kstrtoul(buf + 5, 0, &secs);
1790                if (ret < 0)
1791                        goto out;
1792                stop_scan_thread();
1793                if (secs) {
1794                        jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1795                        start_scan_thread();
1796                }
1797        } else if (strncmp(buf, "scan", 4) == 0)
1798                kmemleak_scan();
1799        else if (strncmp(buf, "dump=", 5) == 0)
1800                ret = dump_str_object_info(buf + 5);
1801        else
1802                ret = -EINVAL;
1803
1804out:
1805        mutex_unlock(&scan_mutex);
1806        if (ret < 0)
1807                return ret;
1808
1809        /* ignore the rest of the buffer, only one command at a time */
1810        *ppos += size;
1811        return size;
1812}
1813
1814static const struct file_operations kmemleak_fops = {
1815        .owner          = THIS_MODULE,
1816        .open           = kmemleak_open,
1817        .read           = seq_read,
1818        .write          = kmemleak_write,
1819        .llseek         = seq_lseek,
1820        .release        = seq_release,
1821};
1822
1823static void __kmemleak_do_cleanup(void)
1824{
1825        struct kmemleak_object *object;
1826
1827        rcu_read_lock();
1828        list_for_each_entry_rcu(object, &object_list, object_list)
1829                delete_object_full(object->pointer);
1830        rcu_read_unlock();
1831}
1832
1833/*
1834 * Stop the memory scanning thread and free the kmemleak internal objects if
1835 * no previous scan thread (otherwise, kmemleak may still have some useful
1836 * information on memory leaks).
1837 */
1838static void kmemleak_do_cleanup(struct work_struct *work)
1839{
1840        stop_scan_thread();
1841
1842        /*
1843         * Once the scan thread has stopped, it is safe to no longer track
1844         * object freeing. Ordering of the scan thread stopping and the memory
1845         * accesses below is guaranteed by the kthread_stop() function.
1846         */
1847        kmemleak_free_enabled = 0;
1848
1849        if (!kmemleak_found_leaks)
1850                __kmemleak_do_cleanup();
1851        else
1852                pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1853}
1854
1855static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1856
1857/*
1858 * Disable kmemleak. No memory allocation/freeing will be traced once this
1859 * function is called. Disabling kmemleak is an irreversible operation.
1860 */
1861static void kmemleak_disable(void)
1862{
1863        /* atomically check whether it was already invoked */
1864        if (cmpxchg(&kmemleak_error, 0, 1))
1865                return;
1866
1867        /* stop any memory operation tracing */
1868        kmemleak_enabled = 0;
1869
1870        /* check whether it is too early for a kernel thread */
1871        if (kmemleak_initialized)
1872                schedule_work(&cleanup_work);
1873        else
1874                kmemleak_free_enabled = 0;
1875
1876        pr_info("Kernel memory leak detector disabled\n");
1877}
1878
1879/*
1880 * Allow boot-time kmemleak disabling (enabled by default).
1881 */
1882static int kmemleak_boot_config(char *str)
1883{
1884        if (!str)
1885                return -EINVAL;
1886        if (strcmp(str, "off") == 0)
1887                kmemleak_disable();
1888        else if (strcmp(str, "on") == 0)
1889                kmemleak_skip_disable = 1;
1890        else
1891                return -EINVAL;
1892        return 0;
1893}
1894early_param("kmemleak", kmemleak_boot_config);
1895
1896static void __init print_log_trace(struct early_log *log)
1897{
1898        struct stack_trace trace;
1899
1900        trace.nr_entries = log->trace_len;
1901        trace.entries = log->trace;
1902
1903        pr_notice("Early log backtrace:\n");
1904        print_stack_trace(&trace, 2);
1905}
1906
1907/*
1908 * Kmemleak initialization.
1909 */
1910void __init kmemleak_init(void)
1911{
1912        int i;
1913        unsigned long flags;
1914
1915#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1916        if (!kmemleak_skip_disable) {
1917                kmemleak_early_log = 0;
1918                kmemleak_disable();
1919                return;
1920        }
1921#endif
1922
1923        jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1924        jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1925
1926        object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1927        scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1928
1929        if (crt_early_log > ARRAY_SIZE(early_log))
1930                pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
1931                        crt_early_log);
1932
1933        /* the kernel is still in UP mode, so disabling the IRQs is enough */
1934        local_irq_save(flags);
1935        kmemleak_early_log = 0;
1936        if (kmemleak_error) {
1937                local_irq_restore(flags);
1938                return;
1939        } else {
1940                kmemleak_enabled = 1;
1941                kmemleak_free_enabled = 1;
1942        }
1943        local_irq_restore(flags);
1944
1945        /*
1946         * This is the point where tracking allocations is safe. Automatic
1947         * scanning is started during the late initcall. Add the early logged
1948         * callbacks to the kmemleak infrastructure.
1949         */
1950        for (i = 0; i < crt_early_log; i++) {
1951                struct early_log *log = &early_log[i];
1952
1953                switch (log->op_type) {
1954                case KMEMLEAK_ALLOC:
1955                        early_alloc(log);
1956                        break;
1957                case KMEMLEAK_ALLOC_PERCPU:
1958                        early_alloc_percpu(log);
1959                        break;
1960                case KMEMLEAK_FREE:
1961                        kmemleak_free(log->ptr);
1962                        break;
1963                case KMEMLEAK_FREE_PART:
1964                        kmemleak_free_part(log->ptr, log->size);
1965                        break;
1966                case KMEMLEAK_FREE_PERCPU:
1967                        kmemleak_free_percpu(log->ptr);
1968                        break;
1969                case KMEMLEAK_NOT_LEAK:
1970                        kmemleak_not_leak(log->ptr);
1971                        break;
1972                case KMEMLEAK_IGNORE:
1973                        kmemleak_ignore(log->ptr);
1974                        break;
1975                case KMEMLEAK_SCAN_AREA:
1976                        kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1977                        break;
1978                case KMEMLEAK_NO_SCAN:
1979                        kmemleak_no_scan(log->ptr);
1980                        break;
1981                default:
1982                        kmemleak_warn("Unknown early log operation: %d\n",
1983                                      log->op_type);
1984                }
1985
1986                if (kmemleak_warning) {
1987                        print_log_trace(log);
1988                        kmemleak_warning = 0;
1989                }
1990        }
1991}
1992
1993/*
1994 * Late initialization function.
1995 */
1996static int __init kmemleak_late_init(void)
1997{
1998        struct dentry *dentry;
1999
2000        kmemleak_initialized = 1;
2001
2002        if (kmemleak_error) {
2003                /*
2004                 * Some error occurred and kmemleak was disabled. There is a
2005                 * small chance that kmemleak_disable() was called immediately
2006                 * after setting kmemleak_initialized and we may end up with
2007                 * two clean-up threads but serialized by scan_mutex.
2008                 */
2009                schedule_work(&cleanup_work);
2010                return -ENOMEM;
2011        }
2012
2013        dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
2014                                     &kmemleak_fops);
2015        if (!dentry)
2016                pr_warn("Failed to create the debugfs kmemleak file\n");
2017        mutex_lock(&scan_mutex);
2018        start_scan_thread();
2019        mutex_unlock(&scan_mutex);
2020
2021        pr_info("Kernel memory leak detector initialized\n");
2022
2023        return 0;
2024}
2025late_initcall(kmemleak_late_init);
2026