linux/mm/kmemleak.c
<<
>>
Prefs
   1/*
   2 * mm/kmemleak.c
   3 *
   4 * Copyright (C) 2008 ARM Limited
   5 * Written by Catalin Marinas <catalin.marinas@arm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19 *
  20 *
  21 * For more information on the algorithm and kmemleak usage, please see
  22 * Documentation/kmemleak.txt.
  23 *
  24 * Notes on locking
  25 * ----------------
  26 *
  27 * The following locks and mutexes are used by kmemleak:
  28 *
  29 * - kmemleak_lock (rwlock): protects the object_list modifications and
  30 *   accesses to the object_tree_root. The object_list is the main list
  31 *   holding the metadata (struct kmemleak_object) for the allocated memory
  32 *   blocks. The object_tree_root is a priority search tree used to look-up
  33 *   metadata based on a pointer to the corresponding memory block.  The
  34 *   kmemleak_object structures are added to the object_list and
  35 *   object_tree_root in the create_object() function called from the
  36 *   kmemleak_alloc() callback and removed in delete_object() called from the
  37 *   kmemleak_free() callback
  38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
  39 *   the metadata (e.g. count) are protected by this lock. Note that some
  40 *   members of this structure may be protected by other means (atomic or
  41 *   kmemleak_lock). This lock is also held when scanning the corresponding
  42 *   memory block to avoid the kernel freeing it via the kmemleak_free()
  43 *   callback. This is less heavyweight than holding a global lock like
  44 *   kmemleak_lock during scanning
  45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
  46 *   unreferenced objects at a time. The gray_list contains the objects which
  47 *   are already referenced or marked as false positives and need to be
  48 *   scanned. This list is only modified during a scanning episode when the
  49 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
  50 *   Note that the kmemleak_object.use_count is incremented when an object is
  51 *   added to the gray_list and therefore cannot be freed. This mutex also
  52 *   prevents multiple users of the "kmemleak" debugfs file together with
  53 *   modifications to the memory scanning parameters including the scan_thread
  54 *   pointer
  55 *
  56 * The kmemleak_object structures have a use_count incremented or decremented
  57 * using the get_object()/put_object() functions. When the use_count becomes
  58 * 0, this count can no longer be incremented and put_object() schedules the
  59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
  60 * function must be protected by rcu_read_lock() to avoid accessing a freed
  61 * structure.
  62 */
  63
  64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  65
  66#include <linux/init.h>
  67#include <linux/kernel.h>
  68#include <linux/list.h>
  69#include <linux/sched.h>
  70#include <linux/jiffies.h>
  71#include <linux/delay.h>
  72#include <linux/module.h>
  73#include <linux/kthread.h>
  74#include <linux/prio_tree.h>
  75#include <linux/gfp.h>
  76#include <linux/fs.h>
  77#include <linux/debugfs.h>
  78#include <linux/seq_file.h>
  79#include <linux/cpumask.h>
  80#include <linux/spinlock.h>
  81#include <linux/mutex.h>
  82#include <linux/rcupdate.h>
  83#include <linux/stacktrace.h>
  84#include <linux/cache.h>
  85#include <linux/percpu.h>
  86#include <linux/hardirq.h>
  87#include <linux/mmzone.h>
  88#include <linux/slab.h>
  89#include <linux/thread_info.h>
  90#include <linux/err.h>
  91#include <linux/uaccess.h>
  92#include <linux/string.h>
  93#include <linux/nodemask.h>
  94#include <linux/mm.h>
  95#include <linux/workqueue.h>
  96
  97#include <asm/sections.h>
  98#include <asm/processor.h>
  99#include <asm/atomic.h>
 100
 101#include <linux/kmemcheck.h>
 102#include <linux/kmemleak.h>
 103
 104/*
 105 * Kmemleak configuration and common defines.
 106 */
 107#define MAX_TRACE               16      /* stack trace length */
 108#define MSECS_MIN_AGE           5000    /* minimum object age for reporting */
 109#define SECS_FIRST_SCAN         60      /* delay before the first scan */
 110#define SECS_SCAN_WAIT          600     /* subsequent auto scanning delay */
 111#define GRAY_LIST_PASSES        25      /* maximum number of gray list scans */
 112#define MAX_SCAN_SIZE           4096    /* maximum size of a scanned block */
 113
 114#define BYTES_PER_POINTER       sizeof(void *)
 115
 116/* GFP bitmask for kmemleak internal allocations */
 117#define GFP_KMEMLEAK_MASK       (GFP_KERNEL | GFP_ATOMIC)
 118
 119/* scanning area inside a memory block */
 120struct kmemleak_scan_area {
 121        struct hlist_node node;
 122        unsigned long offset;
 123        size_t length;
 124};
 125
 126#define KMEMLEAK_GREY   0
 127#define KMEMLEAK_BLACK  -1
 128
 129/*
 130 * Structure holding the metadata for each allocated memory block.
 131 * Modifications to such objects should be made while holding the
 132 * object->lock. Insertions or deletions from object_list, gray_list or
 133 * tree_node are already protected by the corresponding locks or mutex (see
 134 * the notes on locking above). These objects are reference-counted
 135 * (use_count) and freed using the RCU mechanism.
 136 */
 137struct kmemleak_object {
 138        spinlock_t lock;
 139        unsigned long flags;            /* object status flags */
 140        struct list_head object_list;
 141        struct list_head gray_list;
 142        struct prio_tree_node tree_node;
 143        struct rcu_head rcu;            /* object_list lockless traversal */
 144        /* object usage count; object freed when use_count == 0 */
 145        atomic_t use_count;
 146        unsigned long pointer;
 147        size_t size;
 148        /* minimum number of a pointers found before it is considered leak */
 149        int min_count;
 150        /* the total number of pointers found pointing to this object */
 151        int count;
 152        /* memory ranges to be scanned inside an object (empty for all) */
 153        struct hlist_head area_list;
 154        unsigned long trace[MAX_TRACE];
 155        unsigned int trace_len;
 156        unsigned long jiffies;          /* creation timestamp */
 157        pid_t pid;                      /* pid of the current task */
 158        char comm[TASK_COMM_LEN];       /* executable name */
 159};
 160
 161/* flag representing the memory block allocation status */
 162#define OBJECT_ALLOCATED        (1 << 0)
 163/* flag set after the first reporting of an unreference object */
 164#define OBJECT_REPORTED         (1 << 1)
 165/* flag set to not scan the object */
 166#define OBJECT_NO_SCAN          (1 << 2)
 167/* flag set on newly allocated objects */
 168#define OBJECT_NEW              (1 << 3)
 169
 170/* number of bytes to print per line; must be 16 or 32 */
 171#define HEX_ROW_SIZE            16
 172/* number of bytes to print at a time (1, 2, 4, 8) */
 173#define HEX_GROUP_SIZE          1
 174/* include ASCII after the hex output */
 175#define HEX_ASCII               1
 176/* max number of lines to be printed */
 177#define HEX_MAX_LINES           2
 178
 179/* the list of all allocated objects */
 180static LIST_HEAD(object_list);
 181/* the list of gray-colored objects (see color_gray comment below) */
 182static LIST_HEAD(gray_list);
 183/* prio search tree for object boundaries */
 184static struct prio_tree_root object_tree_root;
 185/* rw_lock protecting the access to object_list and prio_tree_root */
 186static DEFINE_RWLOCK(kmemleak_lock);
 187
 188/* allocation caches for kmemleak internal data */
 189static struct kmem_cache *object_cache;
 190static struct kmem_cache *scan_area_cache;
 191
 192/* set if tracing memory operations is enabled */
 193static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
 194/* set in the late_initcall if there were no errors */
 195static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
 196/* enables or disables early logging of the memory operations */
 197static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
 198/* set if a fata kmemleak error has occurred */
 199static atomic_t kmemleak_error = ATOMIC_INIT(0);
 200
 201/* minimum and maximum address that may be valid pointers */
 202static unsigned long min_addr = ULONG_MAX;
 203static unsigned long max_addr;
 204
 205static struct task_struct *scan_thread;
 206/* used to avoid reporting of recently allocated objects */
 207static unsigned long jiffies_min_age;
 208static unsigned long jiffies_last_scan;
 209/* delay between automatic memory scannings */
 210static signed long jiffies_scan_wait;
 211/* enables or disables the task stacks scanning */
 212static int kmemleak_stack_scan = 1;
 213/* protects the memory scanning, parameters and debug/kmemleak file access */
 214static DEFINE_MUTEX(scan_mutex);
 215
 216/*
 217 * Early object allocation/freeing logging. Kmemleak is initialized after the
 218 * kernel allocator. However, both the kernel allocator and kmemleak may
 219 * allocate memory blocks which need to be tracked. Kmemleak defines an
 220 * arbitrary buffer to hold the allocation/freeing information before it is
 221 * fully initialized.
 222 */
 223
 224/* kmemleak operation type for early logging */
 225enum {
 226        KMEMLEAK_ALLOC,
 227        KMEMLEAK_FREE,
 228        KMEMLEAK_FREE_PART,
 229        KMEMLEAK_NOT_LEAK,
 230        KMEMLEAK_IGNORE,
 231        KMEMLEAK_SCAN_AREA,
 232        KMEMLEAK_NO_SCAN
 233};
 234
 235/*
 236 * Structure holding the information passed to kmemleak callbacks during the
 237 * early logging.
 238 */
 239struct early_log {
 240        int op_type;                    /* kmemleak operation type */
 241        const void *ptr;                /* allocated/freed memory block */
 242        size_t size;                    /* memory block size */
 243        int min_count;                  /* minimum reference count */
 244        unsigned long offset;           /* scan area offset */
 245        size_t length;                  /* scan area length */
 246        unsigned long trace[MAX_TRACE]; /* stack trace */
 247        unsigned int trace_len;         /* stack trace length */
 248};
 249
 250/* early logging buffer and current position */
 251static struct early_log
 252        early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
 253static int crt_early_log __initdata;
 254
 255static void kmemleak_disable(void);
 256
 257/*
 258 * Print a warning and dump the stack trace.
 259 */
 260#define kmemleak_warn(x...)     do {    \
 261        pr_warning(x);                  \
 262        dump_stack();                   \
 263} while (0)
 264
 265/*
 266 * Macro invoked when a serious kmemleak condition occured and cannot be
 267 * recovered from. Kmemleak will be disabled and further allocation/freeing
 268 * tracing no longer available.
 269 */
 270#define kmemleak_stop(x...)     do {    \
 271        kmemleak_warn(x);               \
 272        kmemleak_disable();             \
 273} while (0)
 274
 275/*
 276 * Printing of the objects hex dump to the seq file. The number of lines to be
 277 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
 278 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
 279 * with the object->lock held.
 280 */
 281static void hex_dump_object(struct seq_file *seq,
 282                            struct kmemleak_object *object)
 283{
 284        const u8 *ptr = (const u8 *)object->pointer;
 285        int i, len, remaining;
 286        unsigned char linebuf[HEX_ROW_SIZE * 5];
 287
 288        /* limit the number of lines to HEX_MAX_LINES */
 289        remaining = len =
 290                min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
 291
 292        seq_printf(seq, "  hex dump (first %d bytes):\n", len);
 293        for (i = 0; i < len; i += HEX_ROW_SIZE) {
 294                int linelen = min(remaining, HEX_ROW_SIZE);
 295
 296                remaining -= HEX_ROW_SIZE;
 297                hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
 298                                   HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
 299                                   HEX_ASCII);
 300                seq_printf(seq, "    %s\n", linebuf);
 301        }
 302}
 303
 304/*
 305 * Object colors, encoded with count and min_count:
 306 * - white - orphan object, not enough references to it (count < min_count)
 307 * - gray  - not orphan, not marked as false positive (min_count == 0) or
 308 *              sufficient references to it (count >= min_count)
 309 * - black - ignore, it doesn't contain references (e.g. text section)
 310 *              (min_count == -1). No function defined for this color.
 311 * Newly created objects don't have any color assigned (object->count == -1)
 312 * before the next memory scan when they become white.
 313 */
 314static bool color_white(const struct kmemleak_object *object)
 315{
 316        return object->count != KMEMLEAK_BLACK &&
 317                object->count < object->min_count;
 318}
 319
 320static bool color_gray(const struct kmemleak_object *object)
 321{
 322        return object->min_count != KMEMLEAK_BLACK &&
 323                object->count >= object->min_count;
 324}
 325
 326static bool color_black(const struct kmemleak_object *object)
 327{
 328        return object->min_count == KMEMLEAK_BLACK;
 329}
 330
 331/*
 332 * Objects are considered unreferenced only if their color is white, they have
 333 * not be deleted and have a minimum age to avoid false positives caused by
 334 * pointers temporarily stored in CPU registers.
 335 */
 336static bool unreferenced_object(struct kmemleak_object *object)
 337{
 338        return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
 339                time_before_eq(object->jiffies + jiffies_min_age,
 340                               jiffies_last_scan);
 341}
 342
 343/*
 344 * Printing of the unreferenced objects information to the seq file. The
 345 * print_unreferenced function must be called with the object->lock held.
 346 */
 347static void print_unreferenced(struct seq_file *seq,
 348                               struct kmemleak_object *object)
 349{
 350        int i;
 351
 352        seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
 353                   object->pointer, object->size);
 354        seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
 355                   object->comm, object->pid, object->jiffies);
 356        hex_dump_object(seq, object);
 357        seq_printf(seq, "  backtrace:\n");
 358
 359        for (i = 0; i < object->trace_len; i++) {
 360                void *ptr = (void *)object->trace[i];
 361                seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
 362        }
 363}
 364
 365/*
 366 * Print the kmemleak_object information. This function is used mainly for
 367 * debugging special cases when kmemleak operations. It must be called with
 368 * the object->lock held.
 369 */
 370static void dump_object_info(struct kmemleak_object *object)
 371{
 372        struct stack_trace trace;
 373
 374        trace.nr_entries = object->trace_len;
 375        trace.entries = object->trace;
 376
 377        pr_notice("Object 0x%08lx (size %zu):\n",
 378                  object->tree_node.start, object->size);
 379        pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
 380                  object->comm, object->pid, object->jiffies);
 381        pr_notice("  min_count = %d\n", object->min_count);
 382        pr_notice("  count = %d\n", object->count);
 383        pr_notice("  flags = 0x%lx\n", object->flags);
 384        pr_notice("  backtrace:\n");
 385        print_stack_trace(&trace, 4);
 386}
 387
 388/*
 389 * Look-up a memory block metadata (kmemleak_object) in the priority search
 390 * tree based on a pointer value. If alias is 0, only values pointing to the
 391 * beginning of the memory block are allowed. The kmemleak_lock must be held
 392 * when calling this function.
 393 */
 394static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
 395{
 396        struct prio_tree_node *node;
 397        struct prio_tree_iter iter;
 398        struct kmemleak_object *object;
 399
 400        prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
 401        node = prio_tree_next(&iter);
 402        if (node) {
 403                object = prio_tree_entry(node, struct kmemleak_object,
 404                                         tree_node);
 405                if (!alias && object->pointer != ptr) {
 406                        kmemleak_warn("Found object by alias");
 407                        object = NULL;
 408                }
 409        } else
 410                object = NULL;
 411
 412        return object;
 413}
 414
 415/*
 416 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
 417 * that once an object's use_count reached 0, the RCU freeing was already
 418 * registered and the object should no longer be used. This function must be
 419 * called under the protection of rcu_read_lock().
 420 */
 421static int get_object(struct kmemleak_object *object)
 422{
 423        return atomic_inc_not_zero(&object->use_count);
 424}
 425
 426/*
 427 * RCU callback to free a kmemleak_object.
 428 */
 429static void free_object_rcu(struct rcu_head *rcu)
 430{
 431        struct hlist_node *elem, *tmp;
 432        struct kmemleak_scan_area *area;
 433        struct kmemleak_object *object =
 434                container_of(rcu, struct kmemleak_object, rcu);
 435
 436        /*
 437         * Once use_count is 0 (guaranteed by put_object), there is no other
 438         * code accessing this object, hence no need for locking.
 439         */
 440        hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
 441                hlist_del(elem);
 442                kmem_cache_free(scan_area_cache, area);
 443        }
 444        kmem_cache_free(object_cache, object);
 445}
 446
 447/*
 448 * Decrement the object use_count. Once the count is 0, free the object using
 449 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
 450 * delete_object() path, the delayed RCU freeing ensures that there is no
 451 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
 452 * is also possible.
 453 */
 454static void put_object(struct kmemleak_object *object)
 455{
 456        if (!atomic_dec_and_test(&object->use_count))
 457                return;
 458
 459        /* should only get here after delete_object was called */
 460        WARN_ON(object->flags & OBJECT_ALLOCATED);
 461
 462        call_rcu(&object->rcu, free_object_rcu);
 463}
 464
 465/*
 466 * Look up an object in the prio search tree and increase its use_count.
 467 */
 468static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 469{
 470        unsigned long flags;
 471        struct kmemleak_object *object = NULL;
 472
 473        rcu_read_lock();
 474        read_lock_irqsave(&kmemleak_lock, flags);
 475        if (ptr >= min_addr && ptr < max_addr)
 476                object = lookup_object(ptr, alias);
 477        read_unlock_irqrestore(&kmemleak_lock, flags);
 478
 479        /* check whether the object is still available */
 480        if (object && !get_object(object))
 481                object = NULL;
 482        rcu_read_unlock();
 483
 484        return object;
 485}
 486
 487/*
 488 * Save stack trace to the given array of MAX_TRACE size.
 489 */
 490static int __save_stack_trace(unsigned long *trace)
 491{
 492        struct stack_trace stack_trace;
 493
 494        stack_trace.max_entries = MAX_TRACE;
 495        stack_trace.nr_entries = 0;
 496        stack_trace.entries = trace;
 497        stack_trace.skip = 2;
 498        save_stack_trace(&stack_trace);
 499
 500        return stack_trace.nr_entries;
 501}
 502
 503/*
 504 * Create the metadata (struct kmemleak_object) corresponding to an allocated
 505 * memory block and add it to the object_list and object_tree_root.
 506 */
 507static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 508                                             int min_count, gfp_t gfp)
 509{
 510        unsigned long flags;
 511        struct kmemleak_object *object;
 512        struct prio_tree_node *node;
 513
 514        object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
 515        if (!object) {
 516                kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
 517                return NULL;
 518        }
 519
 520        INIT_LIST_HEAD(&object->object_list);
 521        INIT_LIST_HEAD(&object->gray_list);
 522        INIT_HLIST_HEAD(&object->area_list);
 523        spin_lock_init(&object->lock);
 524        atomic_set(&object->use_count, 1);
 525        object->flags = OBJECT_ALLOCATED | OBJECT_NEW;
 526        object->pointer = ptr;
 527        object->size = size;
 528        object->min_count = min_count;
 529        object->count = -1;                     /* no color initially */
 530        object->jiffies = jiffies;
 531
 532        /* task information */
 533        if (in_irq()) {
 534                object->pid = 0;
 535                strncpy(object->comm, "hardirq", sizeof(object->comm));
 536        } else if (in_softirq()) {
 537                object->pid = 0;
 538                strncpy(object->comm, "softirq", sizeof(object->comm));
 539        } else {
 540                object->pid = current->pid;
 541                /*
 542                 * There is a small chance of a race with set_task_comm(),
 543                 * however using get_task_comm() here may cause locking
 544                 * dependency issues with current->alloc_lock. In the worst
 545                 * case, the command line is not correct.
 546                 */
 547                strncpy(object->comm, current->comm, sizeof(object->comm));
 548        }
 549
 550        /* kernel backtrace */
 551        object->trace_len = __save_stack_trace(object->trace);
 552
 553        INIT_PRIO_TREE_NODE(&object->tree_node);
 554        object->tree_node.start = ptr;
 555        object->tree_node.last = ptr + size - 1;
 556
 557        write_lock_irqsave(&kmemleak_lock, flags);
 558
 559        min_addr = min(min_addr, ptr);
 560        max_addr = max(max_addr, ptr + size);
 561        node = prio_tree_insert(&object_tree_root, &object->tree_node);
 562        /*
 563         * The code calling the kernel does not yet have the pointer to the
 564         * memory block to be able to free it.  However, we still hold the
 565         * kmemleak_lock here in case parts of the kernel started freeing
 566         * random memory blocks.
 567         */
 568        if (node != &object->tree_node) {
 569                kmemleak_stop("Cannot insert 0x%lx into the object search tree "
 570                              "(already existing)\n", ptr);
 571                object = lookup_object(ptr, 1);
 572                spin_lock(&object->lock);
 573                dump_object_info(object);
 574                spin_unlock(&object->lock);
 575
 576                goto out;
 577        }
 578        list_add_tail_rcu(&object->object_list, &object_list);
 579out:
 580        write_unlock_irqrestore(&kmemleak_lock, flags);
 581        return object;
 582}
 583
 584/*
 585 * Remove the metadata (struct kmemleak_object) for a memory block from the
 586 * object_list and object_tree_root and decrement its use_count.
 587 */
 588static void __delete_object(struct kmemleak_object *object)
 589{
 590        unsigned long flags;
 591
 592        write_lock_irqsave(&kmemleak_lock, flags);
 593        prio_tree_remove(&object_tree_root, &object->tree_node);
 594        list_del_rcu(&object->object_list);
 595        write_unlock_irqrestore(&kmemleak_lock, flags);
 596
 597        WARN_ON(!(object->flags & OBJECT_ALLOCATED));
 598        WARN_ON(atomic_read(&object->use_count) < 2);
 599
 600        /*
 601         * Locking here also ensures that the corresponding memory block
 602         * cannot be freed when it is being scanned.
 603         */
 604        spin_lock_irqsave(&object->lock, flags);
 605        object->flags &= ~OBJECT_ALLOCATED;
 606        spin_unlock_irqrestore(&object->lock, flags);
 607        put_object(object);
 608}
 609
 610/*
 611 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 612 * delete it.
 613 */
 614static void delete_object_full(unsigned long ptr)
 615{
 616        struct kmemleak_object *object;
 617
 618        object = find_and_get_object(ptr, 0);
 619        if (!object) {
 620#ifdef DEBUG
 621                kmemleak_warn("Freeing unknown object at 0x%08lx\n",
 622                              ptr);
 623#endif
 624                return;
 625        }
 626        __delete_object(object);
 627        put_object(object);
 628}
 629
 630/*
 631 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 632 * delete it. If the memory block is partially freed, the function may create
 633 * additional metadata for the remaining parts of the block.
 634 */
 635static void delete_object_part(unsigned long ptr, size_t size)
 636{
 637        struct kmemleak_object *object;
 638        unsigned long start, end;
 639
 640        object = find_and_get_object(ptr, 1);
 641        if (!object) {
 642#ifdef DEBUG
 643                kmemleak_warn("Partially freeing unknown object at 0x%08lx "
 644                              "(size %zu)\n", ptr, size);
 645#endif
 646                return;
 647        }
 648        __delete_object(object);
 649
 650        /*
 651         * Create one or two objects that may result from the memory block
 652         * split. Note that partial freeing is only done by free_bootmem() and
 653         * this happens before kmemleak_init() is called. The path below is
 654         * only executed during early log recording in kmemleak_init(), so
 655         * GFP_KERNEL is enough.
 656         */
 657        start = object->pointer;
 658        end = object->pointer + object->size;
 659        if (ptr > start)
 660                create_object(start, ptr - start, object->min_count,
 661                              GFP_KERNEL);
 662        if (ptr + size < end)
 663                create_object(ptr + size, end - ptr - size, object->min_count,
 664                              GFP_KERNEL);
 665
 666        put_object(object);
 667}
 668
 669static void __paint_it(struct kmemleak_object *object, int color)
 670{
 671        object->min_count = color;
 672        if (color == KMEMLEAK_BLACK)
 673                object->flags |= OBJECT_NO_SCAN;
 674}
 675
 676static void paint_it(struct kmemleak_object *object, int color)
 677{
 678        unsigned long flags;
 679
 680        spin_lock_irqsave(&object->lock, flags);
 681        __paint_it(object, color);
 682        spin_unlock_irqrestore(&object->lock, flags);
 683}
 684
 685static void paint_ptr(unsigned long ptr, int color)
 686{
 687        struct kmemleak_object *object;
 688
 689        object = find_and_get_object(ptr, 0);
 690        if (!object) {
 691                kmemleak_warn("Trying to color unknown object "
 692                              "at 0x%08lx as %s\n", ptr,
 693                              (color == KMEMLEAK_GREY) ? "Grey" :
 694                              (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
 695                return;
 696        }
 697        paint_it(object, color);
 698        put_object(object);
 699}
 700
 701/*
 702 * Make a object permanently as gray-colored so that it can no longer be
 703 * reported as a leak. This is used in general to mark a false positive.
 704 */
 705static void make_gray_object(unsigned long ptr)
 706{
 707        paint_ptr(ptr, KMEMLEAK_GREY);
 708}
 709
 710/*
 711 * Mark the object as black-colored so that it is ignored from scans and
 712 * reporting.
 713 */
 714static void make_black_object(unsigned long ptr)
 715{
 716        paint_ptr(ptr, KMEMLEAK_BLACK);
 717}
 718
 719/*
 720 * Add a scanning area to the object. If at least one such area is added,
 721 * kmemleak will only scan these ranges rather than the whole memory block.
 722 */
 723static void add_scan_area(unsigned long ptr, unsigned long offset,
 724                          size_t length, gfp_t gfp)
 725{
 726        unsigned long flags;
 727        struct kmemleak_object *object;
 728        struct kmemleak_scan_area *area;
 729
 730        object = find_and_get_object(ptr, 0);
 731        if (!object) {
 732                kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
 733                              ptr);
 734                return;
 735        }
 736
 737        area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
 738        if (!area) {
 739                kmemleak_warn("Cannot allocate a scan area\n");
 740                goto out;
 741        }
 742
 743        spin_lock_irqsave(&object->lock, flags);
 744        if (offset + length > object->size) {
 745                kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 746                dump_object_info(object);
 747                kmem_cache_free(scan_area_cache, area);
 748                goto out_unlock;
 749        }
 750
 751        INIT_HLIST_NODE(&area->node);
 752        area->offset = offset;
 753        area->length = length;
 754
 755        hlist_add_head(&area->node, &object->area_list);
 756out_unlock:
 757        spin_unlock_irqrestore(&object->lock, flags);
 758out:
 759        put_object(object);
 760}
 761
 762/*
 763 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
 764 * pointer. Such object will not be scanned by kmemleak but references to it
 765 * are searched.
 766 */
 767static void object_no_scan(unsigned long ptr)
 768{
 769        unsigned long flags;
 770        struct kmemleak_object *object;
 771
 772        object = find_and_get_object(ptr, 0);
 773        if (!object) {
 774                kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
 775                return;
 776        }
 777
 778        spin_lock_irqsave(&object->lock, flags);
 779        object->flags |= OBJECT_NO_SCAN;
 780        spin_unlock_irqrestore(&object->lock, flags);
 781        put_object(object);
 782}
 783
 784/*
 785 * Log an early kmemleak_* call to the early_log buffer. These calls will be
 786 * processed later once kmemleak is fully initialized.
 787 */
 788static void __init log_early(int op_type, const void *ptr, size_t size,
 789                             int min_count, unsigned long offset, size_t length)
 790{
 791        unsigned long flags;
 792        struct early_log *log;
 793
 794        if (crt_early_log >= ARRAY_SIZE(early_log)) {
 795                pr_warning("Early log buffer exceeded, "
 796                           "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
 797                kmemleak_disable();
 798                return;
 799        }
 800
 801        /*
 802         * There is no need for locking since the kernel is still in UP mode
 803         * at this stage. Disabling the IRQs is enough.
 804         */
 805        local_irq_save(flags);
 806        log = &early_log[crt_early_log];
 807        log->op_type = op_type;
 808        log->ptr = ptr;
 809        log->size = size;
 810        log->min_count = min_count;
 811        log->offset = offset;
 812        log->length = length;
 813        if (op_type == KMEMLEAK_ALLOC)
 814                log->trace_len = __save_stack_trace(log->trace);
 815        crt_early_log++;
 816        local_irq_restore(flags);
 817}
 818
 819/*
 820 * Log an early allocated block and populate the stack trace.
 821 */
 822static void early_alloc(struct early_log *log)
 823{
 824        struct kmemleak_object *object;
 825        unsigned long flags;
 826        int i;
 827
 828        if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
 829                return;
 830
 831        /*
 832         * RCU locking needed to ensure object is not freed via put_object().
 833         */
 834        rcu_read_lock();
 835        object = create_object((unsigned long)log->ptr, log->size,
 836                               log->min_count, GFP_ATOMIC);
 837        if (!object)
 838                goto out;
 839        spin_lock_irqsave(&object->lock, flags);
 840        for (i = 0; i < log->trace_len; i++)
 841                object->trace[i] = log->trace[i];
 842        object->trace_len = log->trace_len;
 843        spin_unlock_irqrestore(&object->lock, flags);
 844out:
 845        rcu_read_unlock();
 846}
 847
 848/*
 849 * Memory allocation function callback. This function is called from the
 850 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
 851 * vmalloc etc.).
 852 */
 853void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 854                          gfp_t gfp)
 855{
 856        pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
 857
 858        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 859                create_object((unsigned long)ptr, size, min_count, gfp);
 860        else if (atomic_read(&kmemleak_early_log))
 861                log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
 862}
 863EXPORT_SYMBOL_GPL(kmemleak_alloc);
 864
 865/*
 866 * Memory freeing function callback. This function is called from the kernel
 867 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
 868 */
 869void __ref kmemleak_free(const void *ptr)
 870{
 871        pr_debug("%s(0x%p)\n", __func__, ptr);
 872
 873        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 874                delete_object_full((unsigned long)ptr);
 875        else if (atomic_read(&kmemleak_early_log))
 876                log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
 877}
 878EXPORT_SYMBOL_GPL(kmemleak_free);
 879
 880/*
 881 * Partial memory freeing function callback. This function is usually called
 882 * from bootmem allocator when (part of) a memory block is freed.
 883 */
 884void __ref kmemleak_free_part(const void *ptr, size_t size)
 885{
 886        pr_debug("%s(0x%p)\n", __func__, ptr);
 887
 888        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 889                delete_object_part((unsigned long)ptr, size);
 890        else if (atomic_read(&kmemleak_early_log))
 891                log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
 892}
 893EXPORT_SYMBOL_GPL(kmemleak_free_part);
 894
 895/*
 896 * Mark an already allocated memory block as a false positive. This will cause
 897 * the block to no longer be reported as leak and always be scanned.
 898 */
 899void __ref kmemleak_not_leak(const void *ptr)
 900{
 901        pr_debug("%s(0x%p)\n", __func__, ptr);
 902
 903        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 904                make_gray_object((unsigned long)ptr);
 905        else if (atomic_read(&kmemleak_early_log))
 906                log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
 907}
 908EXPORT_SYMBOL(kmemleak_not_leak);
 909
 910/*
 911 * Ignore a memory block. This is usually done when it is known that the
 912 * corresponding block is not a leak and does not contain any references to
 913 * other allocated memory blocks.
 914 */
 915void __ref kmemleak_ignore(const void *ptr)
 916{
 917        pr_debug("%s(0x%p)\n", __func__, ptr);
 918
 919        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 920                make_black_object((unsigned long)ptr);
 921        else if (atomic_read(&kmemleak_early_log))
 922                log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
 923}
 924EXPORT_SYMBOL(kmemleak_ignore);
 925
 926/*
 927 * Limit the range to be scanned in an allocated memory block.
 928 */
 929void __ref kmemleak_scan_area(const void *ptr, unsigned long offset,
 930                              size_t length, gfp_t gfp)
 931{
 932        pr_debug("%s(0x%p)\n", __func__, ptr);
 933
 934        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 935                add_scan_area((unsigned long)ptr, offset, length, gfp);
 936        else if (atomic_read(&kmemleak_early_log))
 937                log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
 938}
 939EXPORT_SYMBOL(kmemleak_scan_area);
 940
 941/*
 942 * Inform kmemleak not to scan the given memory block.
 943 */
 944void __ref kmemleak_no_scan(const void *ptr)
 945{
 946        pr_debug("%s(0x%p)\n", __func__, ptr);
 947
 948        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 949                object_no_scan((unsigned long)ptr);
 950        else if (atomic_read(&kmemleak_early_log))
 951                log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
 952}
 953EXPORT_SYMBOL(kmemleak_no_scan);
 954
 955/*
 956 * Memory scanning is a long process and it needs to be interruptable. This
 957 * function checks whether such interrupt condition occured.
 958 */
 959static int scan_should_stop(void)
 960{
 961        if (!atomic_read(&kmemleak_enabled))
 962                return 1;
 963
 964        /*
 965         * This function may be called from either process or kthread context,
 966         * hence the need to check for both stop conditions.
 967         */
 968        if (current->mm)
 969                return signal_pending(current);
 970        else
 971                return kthread_should_stop();
 972
 973        return 0;
 974}
 975
 976/*
 977 * Scan a memory block (exclusive range) for valid pointers and add those
 978 * found to the gray list.
 979 */
 980static void scan_block(void *_start, void *_end,
 981                       struct kmemleak_object *scanned, int allow_resched)
 982{
 983        unsigned long *ptr;
 984        unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
 985        unsigned long *end = _end - (BYTES_PER_POINTER - 1);
 986
 987        for (ptr = start; ptr < end; ptr++) {
 988                struct kmemleak_object *object;
 989                unsigned long flags;
 990                unsigned long pointer;
 991
 992                if (allow_resched)
 993                        cond_resched();
 994                if (scan_should_stop())
 995                        break;
 996
 997                /* don't scan uninitialized memory */
 998                if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
 999                                                  BYTES_PER_POINTER))
1000                        continue;
1001
1002                pointer = *ptr;
1003
1004                object = find_and_get_object(pointer, 1);
1005                if (!object)
1006                        continue;
1007                if (object == scanned) {
1008                        /* self referenced, ignore */
1009                        put_object(object);
1010                        continue;
1011                }
1012
1013                /*
1014                 * Avoid the lockdep recursive warning on object->lock being
1015                 * previously acquired in scan_object(). These locks are
1016                 * enclosed by scan_mutex.
1017                 */
1018                spin_lock_irqsave_nested(&object->lock, flags,
1019                                         SINGLE_DEPTH_NESTING);
1020                if (!color_white(object)) {
1021                        /* non-orphan, ignored or new */
1022                        spin_unlock_irqrestore(&object->lock, flags);
1023                        put_object(object);
1024                        continue;
1025                }
1026
1027                /*
1028                 * Increase the object's reference count (number of pointers
1029                 * to the memory block). If this count reaches the required
1030                 * minimum, the object's color will become gray and it will be
1031                 * added to the gray_list.
1032                 */
1033                object->count++;
1034                if (color_gray(object))
1035                        list_add_tail(&object->gray_list, &gray_list);
1036                else
1037                        put_object(object);
1038                spin_unlock_irqrestore(&object->lock, flags);
1039        }
1040}
1041
1042/*
1043 * Scan a memory block corresponding to a kmemleak_object. A condition is
1044 * that object->use_count >= 1.
1045 */
1046static void scan_object(struct kmemleak_object *object)
1047{
1048        struct kmemleak_scan_area *area;
1049        struct hlist_node *elem;
1050        unsigned long flags;
1051
1052        /*
1053         * Once the object->lock is aquired, the corresponding memory block
1054         * cannot be freed (the same lock is aquired in delete_object).
1055         */
1056        spin_lock_irqsave(&object->lock, flags);
1057        if (object->flags & OBJECT_NO_SCAN)
1058                goto out;
1059        if (!(object->flags & OBJECT_ALLOCATED))
1060                /* already freed object */
1061                goto out;
1062        if (hlist_empty(&object->area_list)) {
1063                void *start = (void *)object->pointer;
1064                void *end = (void *)(object->pointer + object->size);
1065
1066                while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1067                       !(object->flags & OBJECT_NO_SCAN)) {
1068                        scan_block(start, min(start + MAX_SCAN_SIZE, end),
1069                                   object, 0);
1070                        start += MAX_SCAN_SIZE;
1071
1072                        spin_unlock_irqrestore(&object->lock, flags);
1073                        cond_resched();
1074                        spin_lock_irqsave(&object->lock, flags);
1075                }
1076        } else
1077                hlist_for_each_entry(area, elem, &object->area_list, node)
1078                        scan_block((void *)(object->pointer + area->offset),
1079                                   (void *)(object->pointer + area->offset
1080                                            + area->length), object, 0);
1081out:
1082        spin_unlock_irqrestore(&object->lock, flags);
1083}
1084
1085/*
1086 * Scan data sections and all the referenced memory blocks allocated via the
1087 * kernel's standard allocators. This function must be called with the
1088 * scan_mutex held.
1089 */
1090static void kmemleak_scan(void)
1091{
1092        unsigned long flags;
1093        struct kmemleak_object *object, *tmp;
1094        int i;
1095        int new_leaks = 0;
1096        int gray_list_pass = 0;
1097
1098        jiffies_last_scan = jiffies;
1099
1100        /* prepare the kmemleak_object's */
1101        rcu_read_lock();
1102        list_for_each_entry_rcu(object, &object_list, object_list) {
1103                spin_lock_irqsave(&object->lock, flags);
1104#ifdef DEBUG
1105                /*
1106                 * With a few exceptions there should be a maximum of
1107                 * 1 reference to any object at this point.
1108                 */
1109                if (atomic_read(&object->use_count) > 1) {
1110                        pr_debug("object->use_count = %d\n",
1111                                 atomic_read(&object->use_count));
1112                        dump_object_info(object);
1113                }
1114#endif
1115                /* reset the reference count (whiten the object) */
1116                object->count = 0;
1117                object->flags &= ~OBJECT_NEW;
1118                if (color_gray(object) && get_object(object))
1119                        list_add_tail(&object->gray_list, &gray_list);
1120
1121                spin_unlock_irqrestore(&object->lock, flags);
1122        }
1123        rcu_read_unlock();
1124
1125        /* data/bss scanning */
1126        scan_block(_sdata, _edata, NULL, 1);
1127        scan_block(__bss_start, __bss_stop, NULL, 1);
1128
1129#ifdef CONFIG_SMP
1130        /* per-cpu sections scanning */
1131        for_each_possible_cpu(i)
1132                scan_block(__per_cpu_start + per_cpu_offset(i),
1133                           __per_cpu_end + per_cpu_offset(i), NULL, 1);
1134#endif
1135
1136        /*
1137         * Struct page scanning for each node. The code below is not yet safe
1138         * with MEMORY_HOTPLUG.
1139         */
1140        for_each_online_node(i) {
1141                pg_data_t *pgdat = NODE_DATA(i);
1142                unsigned long start_pfn = pgdat->node_start_pfn;
1143                unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1144                unsigned long pfn;
1145
1146                for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1147                        struct page *page;
1148
1149                        if (!pfn_valid(pfn))
1150                                continue;
1151                        page = pfn_to_page(pfn);
1152                        /* only scan if page is in use */
1153                        if (page_count(page) == 0)
1154                                continue;
1155                        scan_block(page, page + 1, NULL, 1);
1156                }
1157        }
1158
1159        /*
1160         * Scanning the task stacks (may introduce false negatives).
1161         */
1162        if (kmemleak_stack_scan) {
1163                struct task_struct *p, *g;
1164
1165                read_lock(&tasklist_lock);
1166                do_each_thread(g, p) {
1167                        scan_block(task_stack_page(p), task_stack_page(p) +
1168                                   THREAD_SIZE, NULL, 0);
1169                } while_each_thread(g, p);
1170                read_unlock(&tasklist_lock);
1171        }
1172
1173        /*
1174         * Scan the objects already referenced from the sections scanned
1175         * above. More objects will be referenced and, if there are no memory
1176         * leaks, all the objects will be scanned. The list traversal is safe
1177         * for both tail additions and removals from inside the loop. The
1178         * kmemleak objects cannot be freed from outside the loop because their
1179         * use_count was increased.
1180         */
1181repeat:
1182        object = list_entry(gray_list.next, typeof(*object), gray_list);
1183        while (&object->gray_list != &gray_list) {
1184                cond_resched();
1185
1186                /* may add new objects to the list */
1187                if (!scan_should_stop())
1188                        scan_object(object);
1189
1190                tmp = list_entry(object->gray_list.next, typeof(*object),
1191                                 gray_list);
1192
1193                /* remove the object from the list and release it */
1194                list_del(&object->gray_list);
1195                put_object(object);
1196
1197                object = tmp;
1198        }
1199
1200        if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
1201                goto scan_end;
1202
1203        /*
1204         * Check for new objects allocated during this scanning and add them
1205         * to the gray list.
1206         */
1207        rcu_read_lock();
1208        list_for_each_entry_rcu(object, &object_list, object_list) {
1209                spin_lock_irqsave(&object->lock, flags);
1210                if ((object->flags & OBJECT_NEW) && !color_black(object) &&
1211                    get_object(object)) {
1212                        object->flags &= ~OBJECT_NEW;
1213                        list_add_tail(&object->gray_list, &gray_list);
1214                }
1215                spin_unlock_irqrestore(&object->lock, flags);
1216        }
1217        rcu_read_unlock();
1218
1219        if (!list_empty(&gray_list))
1220                goto repeat;
1221
1222scan_end:
1223        WARN_ON(!list_empty(&gray_list));
1224
1225        /*
1226         * If scanning was stopped or new objects were being allocated at a
1227         * higher rate than gray list scanning, do not report any new
1228         * unreferenced objects.
1229         */
1230        if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES)
1231                return;
1232
1233        /*
1234         * Scanning result reporting.
1235         */
1236        rcu_read_lock();
1237        list_for_each_entry_rcu(object, &object_list, object_list) {
1238                spin_lock_irqsave(&object->lock, flags);
1239                if (unreferenced_object(object) &&
1240                    !(object->flags & OBJECT_REPORTED)) {
1241                        object->flags |= OBJECT_REPORTED;
1242                        new_leaks++;
1243                }
1244                spin_unlock_irqrestore(&object->lock, flags);
1245        }
1246        rcu_read_unlock();
1247
1248        if (new_leaks)
1249                pr_info("%d new suspected memory leaks (see "
1250                        "/sys/kernel/debug/kmemleak)\n", new_leaks);
1251
1252}
1253
1254/*
1255 * Thread function performing automatic memory scanning. Unreferenced objects
1256 * at the end of a memory scan are reported but only the first time.
1257 */
1258static int kmemleak_scan_thread(void *arg)
1259{
1260        static int first_run = 1;
1261
1262        pr_info("Automatic memory scanning thread started\n");
1263        set_user_nice(current, 10);
1264
1265        /*
1266         * Wait before the first scan to allow the system to fully initialize.
1267         */
1268        if (first_run) {
1269                first_run = 0;
1270                ssleep(SECS_FIRST_SCAN);
1271        }
1272
1273        while (!kthread_should_stop()) {
1274                signed long timeout = jiffies_scan_wait;
1275
1276                mutex_lock(&scan_mutex);
1277                kmemleak_scan();
1278                mutex_unlock(&scan_mutex);
1279
1280                /* wait before the next scan */
1281                while (timeout && !kthread_should_stop())
1282                        timeout = schedule_timeout_interruptible(timeout);
1283        }
1284
1285        pr_info("Automatic memory scanning thread ended\n");
1286
1287        return 0;
1288}
1289
1290/*
1291 * Start the automatic memory scanning thread. This function must be called
1292 * with the scan_mutex held.
1293 */
1294static void start_scan_thread(void)
1295{
1296        if (scan_thread)
1297                return;
1298        scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1299        if (IS_ERR(scan_thread)) {
1300                pr_warning("Failed to create the scan thread\n");
1301                scan_thread = NULL;
1302        }
1303}
1304
1305/*
1306 * Stop the automatic memory scanning thread. This function must be called
1307 * with the scan_mutex held.
1308 */
1309static void stop_scan_thread(void)
1310{
1311        if (scan_thread) {
1312                kthread_stop(scan_thread);
1313                scan_thread = NULL;
1314        }
1315}
1316
1317/*
1318 * Iterate over the object_list and return the first valid object at or after
1319 * the required position with its use_count incremented. The function triggers
1320 * a memory scanning when the pos argument points to the first position.
1321 */
1322static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1323{
1324        struct kmemleak_object *object;
1325        loff_t n = *pos;
1326        int err;
1327
1328        err = mutex_lock_interruptible(&scan_mutex);
1329        if (err < 0)
1330                return ERR_PTR(err);
1331
1332        rcu_read_lock();
1333        list_for_each_entry_rcu(object, &object_list, object_list) {
1334                if (n-- > 0)
1335                        continue;
1336                if (get_object(object))
1337                        goto out;
1338        }
1339        object = NULL;
1340out:
1341        return object;
1342}
1343
1344/*
1345 * Return the next object in the object_list. The function decrements the
1346 * use_count of the previous object and increases that of the next one.
1347 */
1348static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1349{
1350        struct kmemleak_object *prev_obj = v;
1351        struct kmemleak_object *next_obj = NULL;
1352        struct list_head *n = &prev_obj->object_list;
1353
1354        ++(*pos);
1355
1356        list_for_each_continue_rcu(n, &object_list) {
1357                next_obj = list_entry(n, struct kmemleak_object, object_list);
1358                if (get_object(next_obj))
1359                        break;
1360        }
1361
1362        put_object(prev_obj);
1363        return next_obj;
1364}
1365
1366/*
1367 * Decrement the use_count of the last object required, if any.
1368 */
1369static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1370{
1371        if (!IS_ERR(v)) {
1372                /*
1373                 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1374                 * waiting was interrupted, so only release it if !IS_ERR.
1375                 */
1376                rcu_read_unlock();
1377                mutex_unlock(&scan_mutex);
1378                if (v)
1379                        put_object(v);
1380        }
1381}
1382
1383/*
1384 * Print the information for an unreferenced object to the seq file.
1385 */
1386static int kmemleak_seq_show(struct seq_file *seq, void *v)
1387{
1388        struct kmemleak_object *object = v;
1389        unsigned long flags;
1390
1391        spin_lock_irqsave(&object->lock, flags);
1392        if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1393                print_unreferenced(seq, object);
1394        spin_unlock_irqrestore(&object->lock, flags);
1395        return 0;
1396}
1397
1398static const struct seq_operations kmemleak_seq_ops = {
1399        .start = kmemleak_seq_start,
1400        .next  = kmemleak_seq_next,
1401        .stop  = kmemleak_seq_stop,
1402        .show  = kmemleak_seq_show,
1403};
1404
1405static int kmemleak_open(struct inode *inode, struct file *file)
1406{
1407        if (!atomic_read(&kmemleak_enabled))
1408                return -EBUSY;
1409
1410        return seq_open(file, &kmemleak_seq_ops);
1411}
1412
1413static int kmemleak_release(struct inode *inode, struct file *file)
1414{
1415        return seq_release(inode, file);
1416}
1417
1418static int dump_str_object_info(const char *str)
1419{
1420        unsigned long flags;
1421        struct kmemleak_object *object;
1422        unsigned long addr;
1423
1424        addr= simple_strtoul(str, NULL, 0);
1425        object = find_and_get_object(addr, 0);
1426        if (!object) {
1427                pr_info("Unknown object at 0x%08lx\n", addr);
1428                return -EINVAL;
1429        }
1430
1431        spin_lock_irqsave(&object->lock, flags);
1432        dump_object_info(object);
1433        spin_unlock_irqrestore(&object->lock, flags);
1434
1435        put_object(object);
1436        return 0;
1437}
1438
1439/*
1440 * We use grey instead of black to ensure we can do future scans on the same
1441 * objects. If we did not do future scans these black objects could
1442 * potentially contain references to newly allocated objects in the future and
1443 * we'd end up with false positives.
1444 */
1445static void kmemleak_clear(void)
1446{
1447        struct kmemleak_object *object;
1448        unsigned long flags;
1449
1450        rcu_read_lock();
1451        list_for_each_entry_rcu(object, &object_list, object_list) {
1452                spin_lock_irqsave(&object->lock, flags);
1453                if ((object->flags & OBJECT_REPORTED) &&
1454                    unreferenced_object(object))
1455                        __paint_it(object, KMEMLEAK_GREY);
1456                spin_unlock_irqrestore(&object->lock, flags);
1457        }
1458        rcu_read_unlock();
1459}
1460
1461/*
1462 * File write operation to configure kmemleak at run-time. The following
1463 * commands can be written to the /sys/kernel/debug/kmemleak file:
1464 *   off        - disable kmemleak (irreversible)
1465 *   stack=on   - enable the task stacks scanning
1466 *   stack=off  - disable the tasks stacks scanning
1467 *   scan=on    - start the automatic memory scanning thread
1468 *   scan=off   - stop the automatic memory scanning thread
1469 *   scan=...   - set the automatic memory scanning period in seconds (0 to
1470 *                disable it)
1471 *   scan       - trigger a memory scan
1472 *   clear      - mark all current reported unreferenced kmemleak objects as
1473 *                grey to ignore printing them
1474 *   dump=...   - dump information about the object found at the given address
1475 */
1476static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1477                              size_t size, loff_t *ppos)
1478{
1479        char buf[64];
1480        int buf_size;
1481        int ret;
1482
1483        buf_size = min(size, (sizeof(buf) - 1));
1484        if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1485                return -EFAULT;
1486        buf[buf_size] = 0;
1487
1488        ret = mutex_lock_interruptible(&scan_mutex);
1489        if (ret < 0)
1490                return ret;
1491
1492        if (strncmp(buf, "off", 3) == 0)
1493                kmemleak_disable();
1494        else if (strncmp(buf, "stack=on", 8) == 0)
1495                kmemleak_stack_scan = 1;
1496        else if (strncmp(buf, "stack=off", 9) == 0)
1497                kmemleak_stack_scan = 0;
1498        else if (strncmp(buf, "scan=on", 7) == 0)
1499                start_scan_thread();
1500        else if (strncmp(buf, "scan=off", 8) == 0)
1501                stop_scan_thread();
1502        else if (strncmp(buf, "scan=", 5) == 0) {
1503                unsigned long secs;
1504
1505                ret = strict_strtoul(buf + 5, 0, &secs);
1506                if (ret < 0)
1507                        goto out;
1508                stop_scan_thread();
1509                if (secs) {
1510                        jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1511                        start_scan_thread();
1512                }
1513        } else if (strncmp(buf, "scan", 4) == 0)
1514                kmemleak_scan();
1515        else if (strncmp(buf, "clear", 5) == 0)
1516                kmemleak_clear();
1517        else if (strncmp(buf, "dump=", 5) == 0)
1518                ret = dump_str_object_info(buf + 5);
1519        else
1520                ret = -EINVAL;
1521
1522out:
1523        mutex_unlock(&scan_mutex);
1524        if (ret < 0)
1525                return ret;
1526
1527        /* ignore the rest of the buffer, only one command at a time */
1528        *ppos += size;
1529        return size;
1530}
1531
1532static const struct file_operations kmemleak_fops = {
1533        .owner          = THIS_MODULE,
1534        .open           = kmemleak_open,
1535        .read           = seq_read,
1536        .write          = kmemleak_write,
1537        .llseek         = seq_lseek,
1538        .release        = kmemleak_release,
1539};
1540
1541/*
1542 * Perform the freeing of the kmemleak internal objects after waiting for any
1543 * current memory scan to complete.
1544 */
1545static void kmemleak_do_cleanup(struct work_struct *work)
1546{
1547        struct kmemleak_object *object;
1548
1549        mutex_lock(&scan_mutex);
1550        stop_scan_thread();
1551
1552        rcu_read_lock();
1553        list_for_each_entry_rcu(object, &object_list, object_list)
1554                delete_object_full(object->pointer);
1555        rcu_read_unlock();
1556        mutex_unlock(&scan_mutex);
1557}
1558
1559static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1560
1561/*
1562 * Disable kmemleak. No memory allocation/freeing will be traced once this
1563 * function is called. Disabling kmemleak is an irreversible operation.
1564 */
1565static void kmemleak_disable(void)
1566{
1567        /* atomically check whether it was already invoked */
1568        if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1569                return;
1570
1571        /* stop any memory operation tracing */
1572        atomic_set(&kmemleak_early_log, 0);
1573        atomic_set(&kmemleak_enabled, 0);
1574
1575        /* check whether it is too early for a kernel thread */
1576        if (atomic_read(&kmemleak_initialized))
1577                schedule_work(&cleanup_work);
1578
1579        pr_info("Kernel memory leak detector disabled\n");
1580}
1581
1582/*
1583 * Allow boot-time kmemleak disabling (enabled by default).
1584 */
1585static int kmemleak_boot_config(char *str)
1586{
1587        if (!str)
1588                return -EINVAL;
1589        if (strcmp(str, "off") == 0)
1590                kmemleak_disable();
1591        else if (strcmp(str, "on") != 0)
1592                return -EINVAL;
1593        return 0;
1594}
1595early_param("kmemleak", kmemleak_boot_config);
1596
1597/*
1598 * Kmemleak initialization.
1599 */
1600void __init kmemleak_init(void)
1601{
1602        int i;
1603        unsigned long flags;
1604
1605        jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1606        jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1607
1608        object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1609        scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1610        INIT_PRIO_TREE_ROOT(&object_tree_root);
1611
1612        /* the kernel is still in UP mode, so disabling the IRQs is enough */
1613        local_irq_save(flags);
1614        if (!atomic_read(&kmemleak_error)) {
1615                atomic_set(&kmemleak_enabled, 1);
1616                atomic_set(&kmemleak_early_log, 0);
1617        }
1618        local_irq_restore(flags);
1619
1620        /*
1621         * This is the point where tracking allocations is safe. Automatic
1622         * scanning is started during the late initcall. Add the early logged
1623         * callbacks to the kmemleak infrastructure.
1624         */
1625        for (i = 0; i < crt_early_log; i++) {
1626                struct early_log *log = &early_log[i];
1627
1628                switch (log->op_type) {
1629                case KMEMLEAK_ALLOC:
1630                        early_alloc(log);
1631                        break;
1632                case KMEMLEAK_FREE:
1633                        kmemleak_free(log->ptr);
1634                        break;
1635                case KMEMLEAK_FREE_PART:
1636                        kmemleak_free_part(log->ptr, log->size);
1637                        break;
1638                case KMEMLEAK_NOT_LEAK:
1639                        kmemleak_not_leak(log->ptr);
1640                        break;
1641                case KMEMLEAK_IGNORE:
1642                        kmemleak_ignore(log->ptr);
1643                        break;
1644                case KMEMLEAK_SCAN_AREA:
1645                        kmemleak_scan_area(log->ptr, log->offset, log->length,
1646                                           GFP_KERNEL);
1647                        break;
1648                case KMEMLEAK_NO_SCAN:
1649                        kmemleak_no_scan(log->ptr);
1650                        break;
1651                default:
1652                        WARN_ON(1);
1653                }
1654        }
1655}
1656
1657/*
1658 * Late initialization function.
1659 */
1660static int __init kmemleak_late_init(void)
1661{
1662        struct dentry *dentry;
1663
1664        atomic_set(&kmemleak_initialized, 1);
1665
1666        if (atomic_read(&kmemleak_error)) {
1667                /*
1668                 * Some error occured and kmemleak was disabled. There is a
1669                 * small chance that kmemleak_disable() was called immediately
1670                 * after setting kmemleak_initialized and we may end up with
1671                 * two clean-up threads but serialized by scan_mutex.
1672                 */
1673                schedule_work(&cleanup_work);
1674                return -ENOMEM;
1675        }
1676
1677        dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1678                                     &kmemleak_fops);
1679        if (!dentry)
1680                pr_warning("Failed to create the debugfs kmemleak file\n");
1681        mutex_lock(&scan_mutex);
1682        start_scan_thread();
1683        mutex_unlock(&scan_mutex);
1684
1685        pr_info("Kernel memory leak detector initialized\n");
1686
1687        return 0;
1688}
1689late_initcall(kmemleak_late_init);
1690