linux/mm/slab.c
<<
>>
Prefs
   1/*
   2 * linux/mm/slab.c
   3 * Written by Mark Hemment, 1996/97.
   4 * (markhe@nextd.demon.co.uk)
   5 *
   6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
   7 *
   8 * Major cleanup, different bufctl logic, per-cpu arrays
   9 *      (c) 2000 Manfred Spraul
  10 *
  11 * Cleanup, make the head arrays unconditional, preparation for NUMA
  12 *      (c) 2002 Manfred Spraul
  13 *
  14 * An implementation of the Slab Allocator as described in outline in;
  15 *      UNIX Internals: The New Frontiers by Uresh Vahalia
  16 *      Pub: Prentice Hall      ISBN 0-13-101908-2
  17 * or with a little more detail in;
  18 *      The Slab Allocator: An Object-Caching Kernel Memory Allocator
  19 *      Jeff Bonwick (Sun Microsystems).
  20 *      Presented at: USENIX Summer 1994 Technical Conference
  21 *
  22 * The memory is organized in caches, one cache for each object type.
  23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
  24 * Each cache consists out of many slabs (they are small (usually one
  25 * page long) and always contiguous), and each slab contains multiple
  26 * initialized objects.
  27 *
  28 * This means, that your constructor is used only for newly allocated
  29 * slabs and you must pass objects with the same initializations to
  30 * kmem_cache_free.
  31 *
  32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
  33 * normal). If you need a special memory type, then must create a new
  34 * cache for that memory type.
  35 *
  36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
  37 *   full slabs with 0 free objects
  38 *   partial slabs
  39 *   empty slabs with no allocated objects
  40 *
  41 * If partial slabs exist, then new allocations come from these slabs,
  42 * otherwise from empty slabs or new slabs are allocated.
  43 *
  44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
  45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
  46 *
  47 * Each cache has a short per-cpu head array, most allocs
  48 * and frees go into that array, and if that array overflows, then 1/2
  49 * of the entries in the array are given back into the global cache.
  50 * The head array is strictly LIFO and should improve the cache hit rates.
  51 * On SMP, it additionally reduces the spinlock operations.
  52 *
  53 * The c_cpuarray may not be read with enabled local interrupts -
  54 * it's changed with a smp_call_function().
  55 *
  56 * SMP synchronization:
  57 *  constructors and destructors are called without any locking.
  58 *  Several members in struct kmem_cache and struct slab never change, they
  59 *      are accessed without any locking.
  60 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
  61 *      and local interrupts are disabled so slab code is preempt-safe.
  62 *  The non-constant members are protected with a per-cache irq spinlock.
  63 *
  64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
  65 * in 2000 - many ideas in the current implementation are derived from
  66 * his patch.
  67 *
  68 * Further notes from the original documentation:
  69 *
  70 * 11 April '97.  Started multi-threading - markhe
  71 *      The global cache-chain is protected by the mutex 'slab_mutex'.
  72 *      The sem is only needed when accessing/extending the cache-chain, which
  73 *      can never happen inside an interrupt (kmem_cache_create(),
  74 *      kmem_cache_shrink() and kmem_cache_reap()).
  75 *
  76 *      At present, each engine can be growing a cache.  This should be blocked.
  77 *
  78 * 15 March 2005. NUMA slab allocator.
  79 *      Shai Fultheim <shai@scalex86.org>.
  80 *      Shobhit Dayal <shobhit@calsoftinc.com>
  81 *      Alok N Kataria <alokk@calsoftinc.com>
  82 *      Christoph Lameter <christoph@lameter.com>
  83 *
  84 *      Modified the slab allocator to be node aware on NUMA systems.
  85 *      Each node has its own list of partial, free and full slabs.
  86 *      All object allocations for a node occur from node specific slab lists.
  87 */
  88
  89#include        <linux/slab.h>
  90#include        <linux/mm.h>
  91#include        <linux/poison.h>
  92#include        <linux/swap.h>
  93#include        <linux/cache.h>
  94#include        <linux/interrupt.h>
  95#include        <linux/init.h>
  96#include        <linux/compiler.h>
  97#include        <linux/cpuset.h>
  98#include        <linux/proc_fs.h>
  99#include        <linux/seq_file.h>
 100#include        <linux/notifier.h>
 101#include        <linux/kallsyms.h>
 102#include        <linux/cpu.h>
 103#include        <linux/sysctl.h>
 104#include        <linux/module.h>
 105#include        <linux/rcupdate.h>
 106#include        <linux/string.h>
 107#include        <linux/uaccess.h>
 108#include        <linux/nodemask.h>
 109#include        <linux/kmemleak.h>
 110#include        <linux/mempolicy.h>
 111#include        <linux/mutex.h>
 112#include        <linux/fault-inject.h>
 113#include        <linux/rtmutex.h>
 114#include        <linux/reciprocal_div.h>
 115#include        <linux/debugobjects.h>
 116#include        <linux/kmemcheck.h>
 117#include        <linux/memory.h>
 118#include        <linux/prefetch.h>
 119
 120#include        <net/sock.h>
 121
 122#include        <asm/cacheflush.h>
 123#include        <asm/tlbflush.h>
 124#include        <asm/page.h>
 125
 126#include <trace/events/kmem.h>
 127
 128#include        "internal.h"
 129
 130#include        "slab.h"
 131
 132/*
 133 * DEBUG        - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
 134 *                0 for faster, smaller code (especially in the critical paths).
 135 *
 136 * STATS        - 1 to collect stats for /proc/slabinfo.
 137 *                0 for faster, smaller code (especially in the critical paths).
 138 *
 139 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 140 */
 141
 142#ifdef CONFIG_DEBUG_SLAB
 143#define DEBUG           1
 144#define STATS           1
 145#define FORCED_DEBUG    1
 146#else
 147#define DEBUG           0
 148#define STATS           0
 149#define FORCED_DEBUG    0
 150#endif
 151
 152/* Shouldn't this be in a header file somewhere? */
 153#define BYTES_PER_WORD          sizeof(void *)
 154#define REDZONE_ALIGN           max(BYTES_PER_WORD, __alignof__(unsigned long long))
 155
 156#ifndef ARCH_KMALLOC_FLAGS
 157#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
 158#endif
 159
 160#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
 161                                <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
 162
 163#if FREELIST_BYTE_INDEX
 164typedef unsigned char freelist_idx_t;
 165#else
 166typedef unsigned short freelist_idx_t;
 167#endif
 168
 169#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
 170
 171/*
 172 * true if a page was allocated from pfmemalloc reserves for network-based
 173 * swap
 174 */
 175static bool pfmemalloc_active __read_mostly;
 176
 177/*
 178 * struct array_cache
 179 *
 180 * Purpose:
 181 * - LIFO ordering, to hand out cache-warm objects from _alloc
 182 * - reduce the number of linked list operations
 183 * - reduce spinlock operations
 184 *
 185 * The limit is stored in the per-cpu structure to reduce the data cache
 186 * footprint.
 187 *
 188 */
 189struct array_cache {
 190        unsigned int avail;
 191        unsigned int limit;
 192        unsigned int batchcount;
 193        unsigned int touched;
 194        void *entry[];  /*
 195                         * Must have this definition in here for the proper
 196                         * alignment of array_cache. Also simplifies accessing
 197                         * the entries.
 198                         *
 199                         * Entries should not be directly dereferenced as
 200                         * entries belonging to slabs marked pfmemalloc will
 201                         * have the lower bits set SLAB_OBJ_PFMEMALLOC
 202                         */
 203};
 204
 205struct alien_cache {
 206        spinlock_t lock;
 207        struct array_cache ac;
 208};
 209
 210#define SLAB_OBJ_PFMEMALLOC     1
 211static inline bool is_obj_pfmemalloc(void *objp)
 212{
 213        return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
 214}
 215
 216static inline void set_obj_pfmemalloc(void **objp)
 217{
 218        *objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
 219        return;
 220}
 221
 222static inline void clear_obj_pfmemalloc(void **objp)
 223{
 224        *objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
 225}
 226
 227/*
 228 * bootstrap: The caches do not work without cpuarrays anymore, but the
 229 * cpuarrays are allocated from the generic caches...
 230 */
 231#define BOOT_CPUCACHE_ENTRIES   1
 232struct arraycache_init {
 233        struct array_cache cache;
 234        void *entries[BOOT_CPUCACHE_ENTRIES];
 235};
 236
 237/*
 238 * Need this for bootstrapping a per node allocator.
 239 */
 240#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
 241static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
 242#define CACHE_CACHE 0
 243#define SIZE_NODE (MAX_NUMNODES)
 244
 245static int drain_freelist(struct kmem_cache *cache,
 246                        struct kmem_cache_node *n, int tofree);
 247static void free_block(struct kmem_cache *cachep, void **objpp, int len,
 248                        int node, struct list_head *list);
 249static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
 250static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
 251static void cache_reap(struct work_struct *unused);
 252
 253static int slab_early_init = 1;
 254
 255#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
 256
 257static void kmem_cache_node_init(struct kmem_cache_node *parent)
 258{
 259        INIT_LIST_HEAD(&parent->slabs_full);
 260        INIT_LIST_HEAD(&parent->slabs_partial);
 261        INIT_LIST_HEAD(&parent->slabs_free);
 262        parent->shared = NULL;
 263        parent->alien = NULL;
 264        parent->colour_next = 0;
 265        spin_lock_init(&parent->list_lock);
 266        parent->free_objects = 0;
 267        parent->free_touched = 0;
 268}
 269
 270#define MAKE_LIST(cachep, listp, slab, nodeid)                          \
 271        do {                                                            \
 272                INIT_LIST_HEAD(listp);                                  \
 273                list_splice(&get_node(cachep, nodeid)->slab, listp);    \
 274        } while (0)
 275
 276#define MAKE_ALL_LISTS(cachep, ptr, nodeid)                             \
 277        do {                                                            \
 278        MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);  \
 279        MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
 280        MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);  \
 281        } while (0)
 282
 283#define CFLGS_OFF_SLAB          (0x80000000UL)
 284#define OFF_SLAB(x)     ((x)->flags & CFLGS_OFF_SLAB)
 285
 286#define BATCHREFILL_LIMIT       16
 287/*
 288 * Optimization question: fewer reaps means less probability for unnessary
 289 * cpucache drain/refill cycles.
 290 *
 291 * OTOH the cpuarrays can contain lots of objects,
 292 * which could lock up otherwise freeable slabs.
 293 */
 294#define REAPTIMEOUT_AC          (2*HZ)
 295#define REAPTIMEOUT_NODE        (4*HZ)
 296
 297#if STATS
 298#define STATS_INC_ACTIVE(x)     ((x)->num_active++)
 299#define STATS_DEC_ACTIVE(x)     ((x)->num_active--)
 300#define STATS_INC_ALLOCED(x)    ((x)->num_allocations++)
 301#define STATS_INC_GROWN(x)      ((x)->grown++)
 302#define STATS_ADD_REAPED(x,y)   ((x)->reaped += (y))
 303#define STATS_SET_HIGH(x)                                               \
 304        do {                                                            \
 305                if ((x)->num_active > (x)->high_mark)                   \
 306                        (x)->high_mark = (x)->num_active;               \
 307        } while (0)
 308#define STATS_INC_ERR(x)        ((x)->errors++)
 309#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
 310#define STATS_INC_NODEFREES(x)  ((x)->node_frees++)
 311#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
 312#define STATS_SET_FREEABLE(x, i)                                        \
 313        do {                                                            \
 314                if ((x)->max_freeable < i)                              \
 315                        (x)->max_freeable = i;                          \
 316        } while (0)
 317#define STATS_INC_ALLOCHIT(x)   atomic_inc(&(x)->allochit)
 318#define STATS_INC_ALLOCMISS(x)  atomic_inc(&(x)->allocmiss)
 319#define STATS_INC_FREEHIT(x)    atomic_inc(&(x)->freehit)
 320#define STATS_INC_FREEMISS(x)   atomic_inc(&(x)->freemiss)
 321#else
 322#define STATS_INC_ACTIVE(x)     do { } while (0)
 323#define STATS_DEC_ACTIVE(x)     do { } while (0)
 324#define STATS_INC_ALLOCED(x)    do { } while (0)
 325#define STATS_INC_GROWN(x)      do { } while (0)
 326#define STATS_ADD_REAPED(x,y)   do { (void)(y); } while (0)
 327#define STATS_SET_HIGH(x)       do { } while (0)
 328#define STATS_INC_ERR(x)        do { } while (0)
 329#define STATS_INC_NODEALLOCS(x) do { } while (0)
 330#define STATS_INC_NODEFREES(x)  do { } while (0)
 331#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
 332#define STATS_SET_FREEABLE(x, i) do { } while (0)
 333#define STATS_INC_ALLOCHIT(x)   do { } while (0)
 334#define STATS_INC_ALLOCMISS(x)  do { } while (0)
 335#define STATS_INC_FREEHIT(x)    do { } while (0)
 336#define STATS_INC_FREEMISS(x)   do { } while (0)
 337#endif
 338
 339#if DEBUG
 340
 341/*
 342 * memory layout of objects:
 343 * 0            : objp
 344 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
 345 *              the end of an object is aligned with the end of the real
 346 *              allocation. Catches writes behind the end of the allocation.
 347 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
 348 *              redzone word.
 349 * cachep->obj_offset: The real object.
 350 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 351 * cachep->size - 1* BYTES_PER_WORD: last caller address
 352 *                                      [BYTES_PER_WORD long]
 353 */
 354static int obj_offset(struct kmem_cache *cachep)
 355{
 356        return cachep->obj_offset;
 357}
 358
 359static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
 360{
 361        BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 362        return (unsigned long long*) (objp + obj_offset(cachep) -
 363                                      sizeof(unsigned long long));
 364}
 365
 366static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
 367{
 368        BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 369        if (cachep->flags & SLAB_STORE_USER)
 370                return (unsigned long long *)(objp + cachep->size -
 371                                              sizeof(unsigned long long) -
 372                                              REDZONE_ALIGN);
 373        return (unsigned long long *) (objp + cachep->size -
 374                                       sizeof(unsigned long long));
 375}
 376
 377static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 378{
 379        BUG_ON(!(cachep->flags & SLAB_STORE_USER));
 380        return (void **)(objp + cachep->size - BYTES_PER_WORD);
 381}
 382
 383#else
 384
 385#define obj_offset(x)                   0
 386#define dbg_redzone1(cachep, objp)      ({BUG(); (unsigned long long *)NULL;})
 387#define dbg_redzone2(cachep, objp)      ({BUG(); (unsigned long long *)NULL;})
 388#define dbg_userword(cachep, objp)      ({BUG(); (void **)NULL;})
 389
 390#endif
 391
 392#define OBJECT_FREE (0)
 393#define OBJECT_ACTIVE (1)
 394
 395#ifdef CONFIG_DEBUG_SLAB_LEAK
 396
 397static void set_obj_status(struct page *page, int idx, int val)
 398{
 399        int freelist_size;
 400        char *status;
 401        struct kmem_cache *cachep = page->slab_cache;
 402
 403        freelist_size = cachep->num * sizeof(freelist_idx_t);
 404        status = (char *)page->freelist + freelist_size;
 405        status[idx] = val;
 406}
 407
 408static inline unsigned int get_obj_status(struct page *page, int idx)
 409{
 410        int freelist_size;
 411        char *status;
 412        struct kmem_cache *cachep = page->slab_cache;
 413
 414        freelist_size = cachep->num * sizeof(freelist_idx_t);
 415        status = (char *)page->freelist + freelist_size;
 416
 417        return status[idx];
 418}
 419
 420#else
 421static inline void set_obj_status(struct page *page, int idx, int val) {}
 422
 423#endif
 424
 425/*
 426 * Do not go above this order unless 0 objects fit into the slab or
 427 * overridden on the command line.
 428 */
 429#define SLAB_MAX_ORDER_HI       1
 430#define SLAB_MAX_ORDER_LO       0
 431static int slab_max_order = SLAB_MAX_ORDER_LO;
 432static bool slab_max_order_set __initdata;
 433
 434static inline struct kmem_cache *virt_to_cache(const void *obj)
 435{
 436        struct page *page = virt_to_head_page(obj);
 437        return page->slab_cache;
 438}
 439
 440static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
 441                                 unsigned int idx)
 442{
 443        return page->s_mem + cache->size * idx;
 444}
 445
 446/*
 447 * We want to avoid an expensive divide : (offset / cache->size)
 448 *   Using the fact that size is a constant for a particular cache,
 449 *   we can replace (offset / cache->size) by
 450 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 451 */
 452static inline unsigned int obj_to_index(const struct kmem_cache *cache,
 453                                        const struct page *page, void *obj)
 454{
 455        u32 offset = (obj - page->s_mem);
 456        return reciprocal_divide(offset, cache->reciprocal_buffer_size);
 457}
 458
 459/* internal cache of cache description objs */
 460static struct kmem_cache kmem_cache_boot = {
 461        .batchcount = 1,
 462        .limit = BOOT_CPUCACHE_ENTRIES,
 463        .shared = 1,
 464        .size = sizeof(struct kmem_cache),
 465        .name = "kmem_cache",
 466};
 467
 468#define BAD_ALIEN_MAGIC 0x01020304ul
 469
 470static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
 471
 472static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 473{
 474        return this_cpu_ptr(cachep->cpu_cache);
 475}
 476
 477static size_t calculate_freelist_size(int nr_objs, size_t align)
 478{
 479        size_t freelist_size;
 480
 481        freelist_size = nr_objs * sizeof(freelist_idx_t);
 482        if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
 483                freelist_size += nr_objs * sizeof(char);
 484
 485        if (align)
 486                freelist_size = ALIGN(freelist_size, align);
 487
 488        return freelist_size;
 489}
 490
 491static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
 492                                size_t idx_size, size_t align)
 493{
 494        int nr_objs;
 495        size_t remained_size;
 496        size_t freelist_size;
 497        int extra_space = 0;
 498
 499        if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
 500                extra_space = sizeof(char);
 501        /*
 502         * Ignore padding for the initial guess. The padding
 503         * is at most @align-1 bytes, and @buffer_size is at
 504         * least @align. In the worst case, this result will
 505         * be one greater than the number of objects that fit
 506         * into the memory allocation when taking the padding
 507         * into account.
 508         */
 509        nr_objs = slab_size / (buffer_size + idx_size + extra_space);
 510
 511        /*
 512         * This calculated number will be either the right
 513         * amount, or one greater than what we want.
 514         */
 515        remained_size = slab_size - nr_objs * buffer_size;
 516        freelist_size = calculate_freelist_size(nr_objs, align);
 517        if (remained_size < freelist_size)
 518                nr_objs--;
 519
 520        return nr_objs;
 521}
 522
 523/*
 524 * Calculate the number of objects and left-over bytes for a given buffer size.
 525 */
 526static void cache_estimate(unsigned long gfporder, size_t buffer_size,
 527                           size_t align, int flags, size_t *left_over,
 528                           unsigned int *num)
 529{
 530        int nr_objs;
 531        size_t mgmt_size;
 532        size_t slab_size = PAGE_SIZE << gfporder;
 533
 534        /*
 535         * The slab management structure can be either off the slab or
 536         * on it. For the latter case, the memory allocated for a
 537         * slab is used for:
 538         *
 539         * - One unsigned int for each object
 540         * - Padding to respect alignment of @align
 541         * - @buffer_size bytes for each object
 542         *
 543         * If the slab management structure is off the slab, then the
 544         * alignment will already be calculated into the size. Because
 545         * the slabs are all pages aligned, the objects will be at the
 546         * correct alignment when allocated.
 547         */
 548        if (flags & CFLGS_OFF_SLAB) {
 549                mgmt_size = 0;
 550                nr_objs = slab_size / buffer_size;
 551
 552        } else {
 553                nr_objs = calculate_nr_objs(slab_size, buffer_size,
 554                                        sizeof(freelist_idx_t), align);
 555                mgmt_size = calculate_freelist_size(nr_objs, align);
 556        }
 557        *num = nr_objs;
 558        *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
 559}
 560
 561#if DEBUG
 562#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
 563
 564static void __slab_error(const char *function, struct kmem_cache *cachep,
 565                        char *msg)
 566{
 567        printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
 568               function, cachep->name, msg);
 569        dump_stack();
 570        add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 571}
 572#endif
 573
 574/*
 575 * By default on NUMA we use alien caches to stage the freeing of
 576 * objects allocated from other nodes. This causes massive memory
 577 * inefficiencies when using fake NUMA setup to split memory into a
 578 * large number of small nodes, so it can be disabled on the command
 579 * line
 580  */
 581
 582static int use_alien_caches __read_mostly = 1;
 583static int __init noaliencache_setup(char *s)
 584{
 585        use_alien_caches = 0;
 586        return 1;
 587}
 588__setup("noaliencache", noaliencache_setup);
 589
 590static int __init slab_max_order_setup(char *str)
 591{
 592        get_option(&str, &slab_max_order);
 593        slab_max_order = slab_max_order < 0 ? 0 :
 594                                min(slab_max_order, MAX_ORDER - 1);
 595        slab_max_order_set = true;
 596
 597        return 1;
 598}
 599__setup("slab_max_order=", slab_max_order_setup);
 600
 601#ifdef CONFIG_NUMA
 602/*
 603 * Special reaping functions for NUMA systems called from cache_reap().
 604 * These take care of doing round robin flushing of alien caches (containing
 605 * objects freed on different nodes from which they were allocated) and the
 606 * flushing of remote pcps by calling drain_node_pages.
 607 */
 608static DEFINE_PER_CPU(unsigned long, slab_reap_node);
 609
 610static void init_reap_node(int cpu)
 611{
 612        int node;
 613
 614        node = next_node(cpu_to_mem(cpu), node_online_map);
 615        if (node == MAX_NUMNODES)
 616                node = first_node(node_online_map);
 617
 618        per_cpu(slab_reap_node, cpu) = node;
 619}
 620
 621static void next_reap_node(void)
 622{
 623        int node = __this_cpu_read(slab_reap_node);
 624
 625        node = next_node(node, node_online_map);
 626        if (unlikely(node >= MAX_NUMNODES))
 627                node = first_node(node_online_map);
 628        __this_cpu_write(slab_reap_node, node);
 629}
 630
 631#else
 632#define init_reap_node(cpu) do { } while (0)
 633#define next_reap_node(void) do { } while (0)
 634#endif
 635
 636/*
 637 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 638 * via the workqueue/eventd.
 639 * Add the CPU number into the expiration time to minimize the possibility of
 640 * the CPUs getting into lockstep and contending for the global cache chain
 641 * lock.
 642 */
 643static void start_cpu_timer(int cpu)
 644{
 645        struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
 646
 647        /*
 648         * When this gets called from do_initcalls via cpucache_init(),
 649         * init_workqueues() has already run, so keventd will be setup
 650         * at that time.
 651         */
 652        if (keventd_up() && reap_work->work.func == NULL) {
 653                init_reap_node(cpu);
 654                INIT_DEFERRABLE_WORK(reap_work, cache_reap);
 655                schedule_delayed_work_on(cpu, reap_work,
 656                                        __round_jiffies_relative(HZ, cpu));
 657        }
 658}
 659
 660static void init_arraycache(struct array_cache *ac, int limit, int batch)
 661{
 662        /*
 663         * The array_cache structures contain pointers to free object.
 664         * However, when such objects are allocated or transferred to another
 665         * cache the pointers are not cleared and they could be counted as
 666         * valid references during a kmemleak scan. Therefore, kmemleak must
 667         * not scan such objects.
 668         */
 669        kmemleak_no_scan(ac);
 670        if (ac) {
 671                ac->avail = 0;
 672                ac->limit = limit;
 673                ac->batchcount = batch;
 674                ac->touched = 0;
 675        }
 676}
 677
 678static struct array_cache *alloc_arraycache(int node, int entries,
 679                                            int batchcount, gfp_t gfp)
 680{
 681        size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
 682        struct array_cache *ac = NULL;
 683
 684        ac = kmalloc_node(memsize, gfp, node);
 685        init_arraycache(ac, entries, batchcount);
 686        return ac;
 687}
 688
 689static inline bool is_slab_pfmemalloc(struct page *page)
 690{
 691        return PageSlabPfmemalloc(page);
 692}
 693
 694/* Clears pfmemalloc_active if no slabs have pfmalloc set */
 695static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
 696                                                struct array_cache *ac)
 697{
 698        struct kmem_cache_node *n = get_node(cachep, numa_mem_id());
 699        struct page *page;
 700        unsigned long flags;
 701
 702        if (!pfmemalloc_active)
 703                return;
 704
 705        spin_lock_irqsave(&n->list_lock, flags);
 706        list_for_each_entry(page, &n->slabs_full, lru)
 707                if (is_slab_pfmemalloc(page))
 708                        goto out;
 709
 710        list_for_each_entry(page, &n->slabs_partial, lru)
 711                if (is_slab_pfmemalloc(page))
 712                        goto out;
 713
 714        list_for_each_entry(page, &n->slabs_free, lru)
 715                if (is_slab_pfmemalloc(page))
 716                        goto out;
 717
 718        pfmemalloc_active = false;
 719out:
 720        spin_unlock_irqrestore(&n->list_lock, flags);
 721}
 722
 723static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
 724                                                gfp_t flags, bool force_refill)
 725{
 726        int i;
 727        void *objp = ac->entry[--ac->avail];
 728
 729        /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
 730        if (unlikely(is_obj_pfmemalloc(objp))) {
 731                struct kmem_cache_node *n;
 732
 733                if (gfp_pfmemalloc_allowed(flags)) {
 734                        clear_obj_pfmemalloc(&objp);
 735                        return objp;
 736                }
 737
 738                /* The caller cannot use PFMEMALLOC objects, find another one */
 739                for (i = 0; i < ac->avail; i++) {
 740                        /* If a !PFMEMALLOC object is found, swap them */
 741                        if (!is_obj_pfmemalloc(ac->entry[i])) {
 742                                objp = ac->entry[i];
 743                                ac->entry[i] = ac->entry[ac->avail];
 744                                ac->entry[ac->avail] = objp;
 745                                return objp;
 746                        }
 747                }
 748
 749                /*
 750                 * If there are empty slabs on the slabs_free list and we are
 751                 * being forced to refill the cache, mark this one !pfmemalloc.
 752                 */
 753                n = get_node(cachep, numa_mem_id());
 754                if (!list_empty(&n->slabs_free) && force_refill) {
 755                        struct page *page = virt_to_head_page(objp);
 756                        ClearPageSlabPfmemalloc(page);
 757                        clear_obj_pfmemalloc(&objp);
 758                        recheck_pfmemalloc_active(cachep, ac);
 759                        return objp;
 760                }
 761
 762                /* No !PFMEMALLOC objects available */
 763                ac->avail++;
 764                objp = NULL;
 765        }
 766
 767        return objp;
 768}
 769
 770static inline void *ac_get_obj(struct kmem_cache *cachep,
 771                        struct array_cache *ac, gfp_t flags, bool force_refill)
 772{
 773        void *objp;
 774
 775        if (unlikely(sk_memalloc_socks()))
 776                objp = __ac_get_obj(cachep, ac, flags, force_refill);
 777        else
 778                objp = ac->entry[--ac->avail];
 779
 780        return objp;
 781}
 782
 783static noinline void *__ac_put_obj(struct kmem_cache *cachep,
 784                        struct array_cache *ac, void *objp)
 785{
 786        if (unlikely(pfmemalloc_active)) {
 787                /* Some pfmemalloc slabs exist, check if this is one */
 788                struct page *page = virt_to_head_page(objp);
 789                if (PageSlabPfmemalloc(page))
 790                        set_obj_pfmemalloc(&objp);
 791        }
 792
 793        return objp;
 794}
 795
 796static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
 797                                                                void *objp)
 798{
 799        if (unlikely(sk_memalloc_socks()))
 800                objp = __ac_put_obj(cachep, ac, objp);
 801
 802        ac->entry[ac->avail++] = objp;
 803}
 804
 805/*
 806 * Transfer objects in one arraycache to another.
 807 * Locking must be handled by the caller.
 808 *
 809 * Return the number of entries transferred.
 810 */
 811static int transfer_objects(struct array_cache *to,
 812                struct array_cache *from, unsigned int max)
 813{
 814        /* Figure out how many entries to transfer */
 815        int nr = min3(from->avail, max, to->limit - to->avail);
 816
 817        if (!nr)
 818                return 0;
 819
 820        memcpy(to->entry + to->avail, from->entry + from->avail -nr,
 821                        sizeof(void *) *nr);
 822
 823        from->avail -= nr;
 824        to->avail += nr;
 825        return nr;
 826}
 827
 828#ifndef CONFIG_NUMA
 829
 830#define drain_alien_cache(cachep, alien) do { } while (0)
 831#define reap_alien(cachep, n) do { } while (0)
 832
 833static inline struct alien_cache **alloc_alien_cache(int node,
 834                                                int limit, gfp_t gfp)
 835{
 836        return (struct alien_cache **)BAD_ALIEN_MAGIC;
 837}
 838
 839static inline void free_alien_cache(struct alien_cache **ac_ptr)
 840{
 841}
 842
 843static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 844{
 845        return 0;
 846}
 847
 848static inline void *alternate_node_alloc(struct kmem_cache *cachep,
 849                gfp_t flags)
 850{
 851        return NULL;
 852}
 853
 854static inline void *____cache_alloc_node(struct kmem_cache *cachep,
 855                 gfp_t flags, int nodeid)
 856{
 857        return NULL;
 858}
 859
 860#else   /* CONFIG_NUMA */
 861
 862static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
 863static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 864
 865static struct alien_cache *__alloc_alien_cache(int node, int entries,
 866                                                int batch, gfp_t gfp)
 867{
 868        size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
 869        struct alien_cache *alc = NULL;
 870
 871        alc = kmalloc_node(memsize, gfp, node);
 872        init_arraycache(&alc->ac, entries, batch);
 873        spin_lock_init(&alc->lock);
 874        return alc;
 875}
 876
 877static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 878{
 879        struct alien_cache **alc_ptr;
 880        size_t memsize = sizeof(void *) * nr_node_ids;
 881        int i;
 882
 883        if (limit > 1)
 884                limit = 12;
 885        alc_ptr = kzalloc_node(memsize, gfp, node);
 886        if (!alc_ptr)
 887                return NULL;
 888
 889        for_each_node(i) {
 890                if (i == node || !node_online(i))
 891                        continue;
 892                alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
 893                if (!alc_ptr[i]) {
 894                        for (i--; i >= 0; i--)
 895                                kfree(alc_ptr[i]);
 896                        kfree(alc_ptr);
 897                        return NULL;
 898                }
 899        }
 900        return alc_ptr;
 901}
 902
 903static void free_alien_cache(struct alien_cache **alc_ptr)
 904{
 905        int i;
 906
 907        if (!alc_ptr)
 908                return;
 909        for_each_node(i)
 910            kfree(alc_ptr[i]);
 911        kfree(alc_ptr);
 912}
 913
 914static void __drain_alien_cache(struct kmem_cache *cachep,
 915                                struct array_cache *ac, int node,
 916                                struct list_head *list)
 917{
 918        struct kmem_cache_node *n = get_node(cachep, node);
 919
 920        if (ac->avail) {
 921                spin_lock(&n->list_lock);
 922                /*
 923                 * Stuff objects into the remote nodes shared array first.
 924                 * That way we could avoid the overhead of putting the objects
 925                 * into the free lists and getting them back later.
 926                 */
 927                if (n->shared)
 928                        transfer_objects(n->shared, ac, ac->limit);
 929
 930                free_block(cachep, ac->entry, ac->avail, node, list);
 931                ac->avail = 0;
 932                spin_unlock(&n->list_lock);
 933        }
 934}
 935
 936/*
 937 * Called from cache_reap() to regularly drain alien caches round robin.
 938 */
 939static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
 940{
 941        int node = __this_cpu_read(slab_reap_node);
 942
 943        if (n->alien) {
 944                struct alien_cache *alc = n->alien[node];
 945                struct array_cache *ac;
 946
 947                if (alc) {
 948                        ac = &alc->ac;
 949                        if (ac->avail && spin_trylock_irq(&alc->lock)) {
 950                                LIST_HEAD(list);
 951
 952                                __drain_alien_cache(cachep, ac, node, &list);
 953                                spin_unlock_irq(&alc->lock);
 954                                slabs_destroy(cachep, &list);
 955                        }
 956                }
 957        }
 958}
 959
 960static void drain_alien_cache(struct kmem_cache *cachep,
 961                                struct alien_cache **alien)
 962{
 963        int i = 0;
 964        struct alien_cache *alc;
 965        struct array_cache *ac;
 966        unsigned long flags;
 967
 968        for_each_online_node(i) {
 969                alc = alien[i];
 970                if (alc) {
 971                        LIST_HEAD(list);
 972
 973                        ac = &alc->ac;
 974                        spin_lock_irqsave(&alc->lock, flags);
 975                        __drain_alien_cache(cachep, ac, i, &list);
 976                        spin_unlock_irqrestore(&alc->lock, flags);
 977                        slabs_destroy(cachep, &list);
 978                }
 979        }
 980}
 981
 982static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
 983                                int node, int page_node)
 984{
 985        struct kmem_cache_node *n;
 986        struct alien_cache *alien = NULL;
 987        struct array_cache *ac;
 988        LIST_HEAD(list);
 989
 990        n = get_node(cachep, node);
 991        STATS_INC_NODEFREES(cachep);
 992        if (n->alien && n->alien[page_node]) {
 993                alien = n->alien[page_node];
 994                ac = &alien->ac;
 995                spin_lock(&alien->lock);
 996                if (unlikely(ac->avail == ac->limit)) {
 997                        STATS_INC_ACOVERFLOW(cachep);
 998                        __drain_alien_cache(cachep, ac, page_node, &list);
 999                }
1000                ac_put_obj(cachep, ac, objp);
1001                spin_unlock(&alien->lock);
1002                slabs_destroy(cachep, &list);
1003        } else {
1004                n = get_node(cachep, page_node);
1005                spin_lock(&n->list_lock);
1006                free_block(cachep, &objp, 1, page_node, &list);
1007                spin_unlock(&n->list_lock);
1008                slabs_destroy(cachep, &list);
1009        }
1010        return 1;
1011}
1012
1013static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1014{
1015        int page_node = page_to_nid(virt_to_page(objp));
1016        int node = numa_mem_id();
1017        /*
1018         * Make sure we are not freeing a object from another node to the array
1019         * cache on this cpu.
1020         */
1021        if (likely(node == page_node))
1022                return 0;
1023
1024        return __cache_free_alien(cachep, objp, node, page_node);
1025}
1026#endif
1027
1028/*
1029 * Allocates and initializes node for a node on each slab cache, used for
1030 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
1031 * will be allocated off-node since memory is not yet online for the new node.
1032 * When hotplugging memory or a cpu, existing node are not replaced if
1033 * already in use.
1034 *
1035 * Must hold slab_mutex.
1036 */
1037static int init_cache_node_node(int node)
1038{
1039        struct kmem_cache *cachep;
1040        struct kmem_cache_node *n;
1041        const size_t memsize = sizeof(struct kmem_cache_node);
1042
1043        list_for_each_entry(cachep, &slab_caches, list) {
1044                /*
1045                 * Set up the kmem_cache_node for cpu before we can
1046                 * begin anything. Make sure some other cpu on this
1047                 * node has not already allocated this
1048                 */
1049                n = get_node(cachep, node);
1050                if (!n) {
1051                        n = kmalloc_node(memsize, GFP_KERNEL, node);
1052                        if (!n)
1053                                return -ENOMEM;
1054                        kmem_cache_node_init(n);
1055                        n->next_reap = jiffies + REAPTIMEOUT_NODE +
1056                            ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1057
1058                        /*
1059                         * The kmem_cache_nodes don't come and go as CPUs
1060                         * come and go.  slab_mutex is sufficient
1061                         * protection here.
1062                         */
1063                        cachep->node[node] = n;
1064                }
1065
1066                spin_lock_irq(&n->list_lock);
1067                n->free_limit =
1068                        (1 + nr_cpus_node(node)) *
1069                        cachep->batchcount + cachep->num;
1070                spin_unlock_irq(&n->list_lock);
1071        }
1072        return 0;
1073}
1074
1075static inline int slabs_tofree(struct kmem_cache *cachep,
1076                                                struct kmem_cache_node *n)
1077{
1078        return (n->free_objects + cachep->num - 1) / cachep->num;
1079}
1080
1081static void cpuup_canceled(long cpu)
1082{
1083        struct kmem_cache *cachep;
1084        struct kmem_cache_node *n = NULL;
1085        int node = cpu_to_mem(cpu);
1086        const struct cpumask *mask = cpumask_of_node(node);
1087
1088        list_for_each_entry(cachep, &slab_caches, list) {
1089                struct array_cache *nc;
1090                struct array_cache *shared;
1091                struct alien_cache **alien;
1092                LIST_HEAD(list);
1093
1094                n = get_node(cachep, node);
1095                if (!n)
1096                        continue;
1097
1098                spin_lock_irq(&n->list_lock);
1099
1100                /* Free limit for this kmem_cache_node */
1101                n->free_limit -= cachep->batchcount;
1102
1103                /* cpu is dead; no one can alloc from it. */
1104                nc = per_cpu_ptr(cachep->cpu_cache, cpu);
1105                if (nc) {
1106                        free_block(cachep, nc->entry, nc->avail, node, &list);
1107                        nc->avail = 0;
1108                }
1109
1110                if (!cpumask_empty(mask)) {
1111                        spin_unlock_irq(&n->list_lock);
1112                        goto free_slab;
1113                }
1114
1115                shared = n->shared;
1116                if (shared) {
1117                        free_block(cachep, shared->entry,
1118                                   shared->avail, node, &list);
1119                        n->shared = NULL;
1120                }
1121
1122                alien = n->alien;
1123                n->alien = NULL;
1124
1125                spin_unlock_irq(&n->list_lock);
1126
1127                kfree(shared);
1128                if (alien) {
1129                        drain_alien_cache(cachep, alien);
1130                        free_alien_cache(alien);
1131                }
1132
1133free_slab:
1134                slabs_destroy(cachep, &list);
1135        }
1136        /*
1137         * In the previous loop, all the objects were freed to
1138         * the respective cache's slabs,  now we can go ahead and
1139         * shrink each nodelist to its limit.
1140         */
1141        list_for_each_entry(cachep, &slab_caches, list) {
1142                n = get_node(cachep, node);
1143                if (!n)
1144                        continue;
1145                drain_freelist(cachep, n, slabs_tofree(cachep, n));
1146        }
1147}
1148
1149static int cpuup_prepare(long cpu)
1150{
1151        struct kmem_cache *cachep;
1152        struct kmem_cache_node *n = NULL;
1153        int node = cpu_to_mem(cpu);
1154        int err;
1155
1156        /*
1157         * We need to do this right in the beginning since
1158         * alloc_arraycache's are going to use this list.
1159         * kmalloc_node allows us to add the slab to the right
1160         * kmem_cache_node and not this cpu's kmem_cache_node
1161         */
1162        err = init_cache_node_node(node);
1163        if (err < 0)
1164                goto bad;
1165
1166        /*
1167         * Now we can go ahead with allocating the shared arrays and
1168         * array caches
1169         */
1170        list_for_each_entry(cachep, &slab_caches, list) {
1171                struct array_cache *shared = NULL;
1172                struct alien_cache **alien = NULL;
1173
1174                if (cachep->shared) {
1175                        shared = alloc_arraycache(node,
1176                                cachep->shared * cachep->batchcount,
1177                                0xbaadf00d, GFP_KERNEL);
1178                        if (!shared)
1179                                goto bad;
1180                }
1181                if (use_alien_caches) {
1182                        alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1183                        if (!alien) {
1184                                kfree(shared);
1185                                goto bad;
1186                        }
1187                }
1188                n = get_node(cachep, node);
1189                BUG_ON(!n);
1190
1191                spin_lock_irq(&n->list_lock);
1192                if (!n->shared) {
1193                        /*
1194                         * We are serialised from CPU_DEAD or
1195                         * CPU_UP_CANCELLED by the cpucontrol lock
1196                         */
1197                        n->shared = shared;
1198                        shared = NULL;
1199                }
1200#ifdef CONFIG_NUMA
1201                if (!n->alien) {
1202                        n->alien = alien;
1203                        alien = NULL;
1204                }
1205#endif
1206                spin_unlock_irq(&n->list_lock);
1207                kfree(shared);
1208                free_alien_cache(alien);
1209        }
1210
1211        return 0;
1212bad:
1213        cpuup_canceled(cpu);
1214        return -ENOMEM;
1215}
1216
1217static int cpuup_callback(struct notifier_block *nfb,
1218                                    unsigned long action, void *hcpu)
1219{
1220        long cpu = (long)hcpu;
1221        int err = 0;
1222
1223        switch (action) {
1224        case CPU_UP_PREPARE:
1225        case CPU_UP_PREPARE_FROZEN:
1226                mutex_lock(&slab_mutex);
1227                err = cpuup_prepare(cpu);
1228                mutex_unlock(&slab_mutex);
1229                break;
1230        case CPU_ONLINE:
1231        case CPU_ONLINE_FROZEN:
1232                start_cpu_timer(cpu);
1233                break;
1234#ifdef CONFIG_HOTPLUG_CPU
1235        case CPU_DOWN_PREPARE:
1236        case CPU_DOWN_PREPARE_FROZEN:
1237                /*
1238                 * Shutdown cache reaper. Note that the slab_mutex is
1239                 * held so that if cache_reap() is invoked it cannot do
1240                 * anything expensive but will only modify reap_work
1241                 * and reschedule the timer.
1242                */
1243                cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1244                /* Now the cache_reaper is guaranteed to be not running. */
1245                per_cpu(slab_reap_work, cpu).work.func = NULL;
1246                break;
1247        case CPU_DOWN_FAILED:
1248        case CPU_DOWN_FAILED_FROZEN:
1249                start_cpu_timer(cpu);
1250                break;
1251        case CPU_DEAD:
1252        case CPU_DEAD_FROZEN:
1253                /*
1254                 * Even if all the cpus of a node are down, we don't free the
1255                 * kmem_cache_node of any cache. This to avoid a race between
1256                 * cpu_down, and a kmalloc allocation from another cpu for
1257                 * memory from the node of the cpu going down.  The node
1258                 * structure is usually allocated from kmem_cache_create() and
1259                 * gets destroyed at kmem_cache_destroy().
1260                 */
1261                /* fall through */
1262#endif
1263        case CPU_UP_CANCELED:
1264        case CPU_UP_CANCELED_FROZEN:
1265                mutex_lock(&slab_mutex);
1266                cpuup_canceled(cpu);
1267                mutex_unlock(&slab_mutex);
1268                break;
1269        }
1270        return notifier_from_errno(err);
1271}
1272
1273static struct notifier_block cpucache_notifier = {
1274        &cpuup_callback, NULL, 0
1275};
1276
1277#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1278/*
1279 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1280 * Returns -EBUSY if all objects cannot be drained so that the node is not
1281 * removed.
1282 *
1283 * Must hold slab_mutex.
1284 */
1285static int __meminit drain_cache_node_node(int node)
1286{
1287        struct kmem_cache *cachep;
1288        int ret = 0;
1289
1290        list_for_each_entry(cachep, &slab_caches, list) {
1291                struct kmem_cache_node *n;
1292
1293                n = get_node(cachep, node);
1294                if (!n)
1295                        continue;
1296
1297                drain_freelist(cachep, n, slabs_tofree(cachep, n));
1298
1299                if (!list_empty(&n->slabs_full) ||
1300                    !list_empty(&n->slabs_partial)) {
1301                        ret = -EBUSY;
1302                        break;
1303                }
1304        }
1305        return ret;
1306}
1307
1308static int __meminit slab_memory_callback(struct notifier_block *self,
1309                                        unsigned long action, void *arg)
1310{
1311        struct memory_notify *mnb = arg;
1312        int ret = 0;
1313        int nid;
1314
1315        nid = mnb->status_change_nid;
1316        if (nid < 0)
1317                goto out;
1318
1319        switch (action) {
1320        case MEM_GOING_ONLINE:
1321                mutex_lock(&slab_mutex);
1322                ret = init_cache_node_node(nid);
1323                mutex_unlock(&slab_mutex);
1324                break;
1325        case MEM_GOING_OFFLINE:
1326                mutex_lock(&slab_mutex);
1327                ret = drain_cache_node_node(nid);
1328                mutex_unlock(&slab_mutex);
1329                break;
1330        case MEM_ONLINE:
1331        case MEM_OFFLINE:
1332        case MEM_CANCEL_ONLINE:
1333        case MEM_CANCEL_OFFLINE:
1334                break;
1335        }
1336out:
1337        return notifier_from_errno(ret);
1338}
1339#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1340
1341/*
1342 * swap the static kmem_cache_node with kmalloced memory
1343 */
1344static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1345                                int nodeid)
1346{
1347        struct kmem_cache_node *ptr;
1348
1349        ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1350        BUG_ON(!ptr);
1351
1352        memcpy(ptr, list, sizeof(struct kmem_cache_node));
1353        /*
1354         * Do not assume that spinlocks can be initialized via memcpy:
1355         */
1356        spin_lock_init(&ptr->list_lock);
1357
1358        MAKE_ALL_LISTS(cachep, ptr, nodeid);
1359        cachep->node[nodeid] = ptr;
1360}
1361
1362/*
1363 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1364 * size of kmem_cache_node.
1365 */
1366static void __init set_up_node(struct kmem_cache *cachep, int index)
1367{
1368        int node;
1369
1370        for_each_online_node(node) {
1371                cachep->node[node] = &init_kmem_cache_node[index + node];
1372                cachep->node[node]->next_reap = jiffies +
1373                    REAPTIMEOUT_NODE +
1374                    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1375        }
1376}
1377
1378/*
1379 * Initialisation.  Called after the page allocator have been initialised and
1380 * before smp_init().
1381 */
1382void __init kmem_cache_init(void)
1383{
1384        int i;
1385
1386        BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1387                                        sizeof(struct rcu_head));
1388        kmem_cache = &kmem_cache_boot;
1389
1390        if (num_possible_nodes() == 1)
1391                use_alien_caches = 0;
1392
1393        for (i = 0; i < NUM_INIT_LISTS; i++)
1394                kmem_cache_node_init(&init_kmem_cache_node[i]);
1395
1396        /*
1397         * Fragmentation resistance on low memory - only use bigger
1398         * page orders on machines with more than 32MB of memory if
1399         * not overridden on the command line.
1400         */
1401        if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1402                slab_max_order = SLAB_MAX_ORDER_HI;
1403
1404        /* Bootstrap is tricky, because several objects are allocated
1405         * from caches that do not exist yet:
1406         * 1) initialize the kmem_cache cache: it contains the struct
1407         *    kmem_cache structures of all caches, except kmem_cache itself:
1408         *    kmem_cache is statically allocated.
1409         *    Initially an __init data area is used for the head array and the
1410         *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1411         *    array at the end of the bootstrap.
1412         * 2) Create the first kmalloc cache.
1413         *    The struct kmem_cache for the new cache is allocated normally.
1414         *    An __init data area is used for the head array.
1415         * 3) Create the remaining kmalloc caches, with minimally sized
1416         *    head arrays.
1417         * 4) Replace the __init data head arrays for kmem_cache and the first
1418         *    kmalloc cache with kmalloc allocated arrays.
1419         * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1420         *    the other cache's with kmalloc allocated memory.
1421         * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1422         */
1423
1424        /* 1) create the kmem_cache */
1425
1426        /*
1427         * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1428         */
1429        create_boot_cache(kmem_cache, "kmem_cache",
1430                offsetof(struct kmem_cache, node) +
1431                                  nr_node_ids * sizeof(struct kmem_cache_node *),
1432                                  SLAB_HWCACHE_ALIGN);
1433        list_add(&kmem_cache->list, &slab_caches);
1434        slab_state = PARTIAL;
1435
1436        /*
1437         * Initialize the caches that provide memory for the  kmem_cache_node
1438         * structures first.  Without this, further allocations will bug.
1439         */
1440        kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
1441                                kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1442        slab_state = PARTIAL_NODE;
1443
1444        slab_early_init = 0;
1445
1446        /* 5) Replace the bootstrap kmem_cache_node */
1447        {
1448                int nid;
1449
1450                for_each_online_node(nid) {
1451                        init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1452
1453                        init_list(kmalloc_caches[INDEX_NODE],
1454                                          &init_kmem_cache_node[SIZE_NODE + nid], nid);
1455                }
1456        }
1457
1458        create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1459}
1460
1461void __init kmem_cache_init_late(void)
1462{
1463        struct kmem_cache *cachep;
1464
1465        slab_state = UP;
1466
1467        /* 6) resize the head arrays to their final sizes */
1468        mutex_lock(&slab_mutex);
1469        list_for_each_entry(cachep, &slab_caches, list)
1470                if (enable_cpucache(cachep, GFP_NOWAIT))
1471                        BUG();
1472        mutex_unlock(&slab_mutex);
1473
1474        /* Done! */
1475        slab_state = FULL;
1476
1477        /*
1478         * Register a cpu startup notifier callback that initializes
1479         * cpu_cache_get for all new cpus
1480         */
1481        register_cpu_notifier(&cpucache_notifier);
1482
1483#ifdef CONFIG_NUMA
1484        /*
1485         * Register a memory hotplug callback that initializes and frees
1486         * node.
1487         */
1488        hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1489#endif
1490
1491        /*
1492         * The reap timers are started later, with a module init call: That part
1493         * of the kernel is not yet operational.
1494         */
1495}
1496
1497static int __init cpucache_init(void)
1498{
1499        int cpu;
1500
1501        /*
1502         * Register the timers that return unneeded pages to the page allocator
1503         */
1504        for_each_online_cpu(cpu)
1505                start_cpu_timer(cpu);
1506
1507        /* Done! */
1508        slab_state = FULL;
1509        return 0;
1510}
1511__initcall(cpucache_init);
1512
1513static noinline void
1514slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1515{
1516#if DEBUG
1517        struct kmem_cache_node *n;
1518        struct page *page;
1519        unsigned long flags;
1520        int node;
1521        static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1522                                      DEFAULT_RATELIMIT_BURST);
1523
1524        if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1525                return;
1526
1527        printk(KERN_WARNING
1528                "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1529                nodeid, gfpflags);
1530        printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
1531                cachep->name, cachep->size, cachep->gfporder);
1532
1533        for_each_kmem_cache_node(cachep, node, n) {
1534                unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1535                unsigned long active_slabs = 0, num_slabs = 0;
1536
1537                spin_lock_irqsave(&n->list_lock, flags);
1538                list_for_each_entry(page, &n->slabs_full, lru) {
1539                        active_objs += cachep->num;
1540                        active_slabs++;
1541                }
1542                list_for_each_entry(page, &n->slabs_partial, lru) {
1543                        active_objs += page->active;
1544                        active_slabs++;
1545                }
1546                list_for_each_entry(page, &n->slabs_free, lru)
1547                        num_slabs++;
1548
1549                free_objects += n->free_objects;
1550                spin_unlock_irqrestore(&n->list_lock, flags);
1551
1552                num_slabs += active_slabs;
1553                num_objs = num_slabs * cachep->num;
1554                printk(KERN_WARNING
1555                        "  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1556                        node, active_slabs, num_slabs, active_objs, num_objs,
1557                        free_objects);
1558        }
1559#endif
1560}
1561
1562/*
1563 * Interface to system's page allocator. No need to hold the
1564 * kmem_cache_node ->list_lock.
1565 *
1566 * If we requested dmaable memory, we will get it. Even if we
1567 * did not request dmaable memory, we might get it, but that
1568 * would be relatively rare and ignorable.
1569 */
1570static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1571                                                                int nodeid)
1572{
1573        struct page *page;
1574        int nr_pages;
1575
1576        flags |= cachep->allocflags;
1577        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1578                flags |= __GFP_RECLAIMABLE;
1579
1580        if (memcg_charge_slab(cachep, flags, cachep->gfporder))
1581                return NULL;
1582
1583        page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1584        if (!page) {
1585                memcg_uncharge_slab(cachep, cachep->gfporder);
1586                slab_out_of_memory(cachep, flags, nodeid);
1587                return NULL;
1588        }
1589
1590        /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1591        if (unlikely(page->pfmemalloc))
1592                pfmemalloc_active = true;
1593
1594        nr_pages = (1 << cachep->gfporder);
1595        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1596                add_zone_page_state(page_zone(page),
1597                        NR_SLAB_RECLAIMABLE, nr_pages);
1598        else
1599                add_zone_page_state(page_zone(page),
1600                        NR_SLAB_UNRECLAIMABLE, nr_pages);
1601        __SetPageSlab(page);
1602        if (page->pfmemalloc)
1603                SetPageSlabPfmemalloc(page);
1604
1605        if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1606                kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1607
1608                if (cachep->ctor)
1609                        kmemcheck_mark_uninitialized_pages(page, nr_pages);
1610                else
1611                        kmemcheck_mark_unallocated_pages(page, nr_pages);
1612        }
1613
1614        return page;
1615}
1616
1617/*
1618 * Interface to system's page release.
1619 */
1620static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1621{
1622        const unsigned long nr_freed = (1 << cachep->gfporder);
1623
1624        kmemcheck_free_shadow(page, cachep->gfporder);
1625
1626        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1627                sub_zone_page_state(page_zone(page),
1628                                NR_SLAB_RECLAIMABLE, nr_freed);
1629        else
1630                sub_zone_page_state(page_zone(page),
1631                                NR_SLAB_UNRECLAIMABLE, nr_freed);
1632
1633        BUG_ON(!PageSlab(page));
1634        __ClearPageSlabPfmemalloc(page);
1635        __ClearPageSlab(page);
1636        page_mapcount_reset(page);
1637        page->mapping = NULL;
1638
1639        if (current->reclaim_state)
1640                current->reclaim_state->reclaimed_slab += nr_freed;
1641        __free_pages(page, cachep->gfporder);
1642        memcg_uncharge_slab(cachep, cachep->gfporder);
1643}
1644
1645static void kmem_rcu_free(struct rcu_head *head)
1646{
1647        struct kmem_cache *cachep;
1648        struct page *page;
1649
1650        page = container_of(head, struct page, rcu_head);
1651        cachep = page->slab_cache;
1652
1653        kmem_freepages(cachep, page);
1654}
1655
1656#if DEBUG
1657
1658#ifdef CONFIG_DEBUG_PAGEALLOC
1659static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1660                            unsigned long caller)
1661{
1662        int size = cachep->object_size;
1663
1664        addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1665
1666        if (size < 5 * sizeof(unsigned long))
1667                return;
1668
1669        *addr++ = 0x12345678;
1670        *addr++ = caller;
1671        *addr++ = smp_processor_id();
1672        size -= 3 * sizeof(unsigned long);
1673        {
1674                unsigned long *sptr = &caller;
1675                unsigned long svalue;
1676
1677                while (!kstack_end(sptr)) {
1678                        svalue = *sptr++;
1679                        if (kernel_text_address(svalue)) {
1680                                *addr++ = svalue;
1681                                size -= sizeof(unsigned long);
1682                                if (size <= sizeof(unsigned long))
1683                                        break;
1684                        }
1685                }
1686
1687        }
1688        *addr++ = 0x87654321;
1689}
1690#endif
1691
1692static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1693{
1694        int size = cachep->object_size;
1695        addr = &((char *)addr)[obj_offset(cachep)];
1696
1697        memset(addr, val, size);
1698        *(unsigned char *)(addr + size - 1) = POISON_END;
1699}
1700
1701static void dump_line(char *data, int offset, int limit)
1702{
1703        int i;
1704        unsigned char error = 0;
1705        int bad_count = 0;
1706
1707        printk(KERN_ERR "%03x: ", offset);
1708        for (i = 0; i < limit; i++) {
1709                if (data[offset + i] != POISON_FREE) {
1710                        error = data[offset + i];
1711                        bad_count++;
1712                }
1713        }
1714        print_hex_dump(KERN_CONT, "", 0, 16, 1,
1715                        &data[offset], limit, 1);
1716
1717        if (bad_count == 1) {
1718                error ^= POISON_FREE;
1719                if (!(error & (error - 1))) {
1720                        printk(KERN_ERR "Single bit error detected. Probably "
1721                                        "bad RAM.\n");
1722#ifdef CONFIG_X86
1723                        printk(KERN_ERR "Run memtest86+ or a similar memory "
1724                                        "test tool.\n");
1725#else
1726                        printk(KERN_ERR "Run a memory test tool.\n");
1727#endif
1728                }
1729        }
1730}
1731#endif
1732
1733#if DEBUG
1734
1735static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1736{
1737        int i, size;
1738        char *realobj;
1739
1740        if (cachep->flags & SLAB_RED_ZONE) {
1741                printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1742                        *dbg_redzone1(cachep, objp),
1743                        *dbg_redzone2(cachep, objp));
1744        }
1745
1746        if (cachep->flags & SLAB_STORE_USER) {
1747                printk(KERN_ERR "Last user: [<%p>](%pSR)\n",
1748                       *dbg_userword(cachep, objp),
1749                       *dbg_userword(cachep, objp));
1750        }
1751        realobj = (char *)objp + obj_offset(cachep);
1752        size = cachep->object_size;
1753        for (i = 0; i < size && lines; i += 16, lines--) {
1754                int limit;
1755                limit = 16;
1756                if (i + limit > size)
1757                        limit = size - i;
1758                dump_line(realobj, i, limit);
1759        }
1760}
1761
1762static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1763{
1764        char *realobj;
1765        int size, i;
1766        int lines = 0;
1767
1768        realobj = (char *)objp + obj_offset(cachep);
1769        size = cachep->object_size;
1770
1771        for (i = 0; i < size; i++) {
1772                char exp = POISON_FREE;
1773                if (i == size - 1)
1774                        exp = POISON_END;
1775                if (realobj[i] != exp) {
1776                        int limit;
1777                        /* Mismatch ! */
1778                        /* Print header */
1779                        if (lines == 0) {
1780                                printk(KERN_ERR
1781                                        "Slab corruption (%s): %s start=%p, len=%d\n",
1782                                        print_tainted(), cachep->name, realobj, size);
1783                                print_objinfo(cachep, objp, 0);
1784                        }
1785                        /* Hexdump the affected line */
1786                        i = (i / 16) * 16;
1787                        limit = 16;
1788                        if (i + limit > size)
1789                                limit = size - i;
1790                        dump_line(realobj, i, limit);
1791                        i += 16;
1792                        lines++;
1793                        /* Limit to 5 lines */
1794                        if (lines > 5)
1795                                break;
1796                }
1797        }
1798        if (lines != 0) {
1799                /* Print some data about the neighboring objects, if they
1800                 * exist:
1801                 */
1802                struct page *page = virt_to_head_page(objp);
1803                unsigned int objnr;
1804
1805                objnr = obj_to_index(cachep, page, objp);
1806                if (objnr) {
1807                        objp = index_to_obj(cachep, page, objnr - 1);
1808                        realobj = (char *)objp + obj_offset(cachep);
1809                        printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1810                               realobj, size);
1811                        print_objinfo(cachep, objp, 2);
1812                }
1813                if (objnr + 1 < cachep->num) {
1814                        objp = index_to_obj(cachep, page, objnr + 1);
1815                        realobj = (char *)objp + obj_offset(cachep);
1816                        printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1817                               realobj, size);
1818                        print_objinfo(cachep, objp, 2);
1819                }
1820        }
1821}
1822#endif
1823
1824#if DEBUG
1825static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1826                                                struct page *page)
1827{
1828        int i;
1829        for (i = 0; i < cachep->num; i++) {
1830                void *objp = index_to_obj(cachep, page, i);
1831
1832                if (cachep->flags & SLAB_POISON) {
1833#ifdef CONFIG_DEBUG_PAGEALLOC
1834                        if (cachep->size % PAGE_SIZE == 0 &&
1835                                        OFF_SLAB(cachep))
1836                                kernel_map_pages(virt_to_page(objp),
1837                                        cachep->size / PAGE_SIZE, 1);
1838                        else
1839                                check_poison_obj(cachep, objp);
1840#else
1841                        check_poison_obj(cachep, objp);
1842#endif
1843                }
1844                if (cachep->flags & SLAB_RED_ZONE) {
1845                        if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1846                                slab_error(cachep, "start of a freed object "
1847                                           "was overwritten");
1848                        if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1849                                slab_error(cachep, "end of a freed object "
1850                                           "was overwritten");
1851                }
1852        }
1853}
1854#else
1855static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1856                                                struct page *page)
1857{
1858}
1859#endif
1860
1861/**
1862 * slab_destroy - destroy and release all objects in a slab
1863 * @cachep: cache pointer being destroyed
1864 * @page: page pointer being destroyed
1865 *
1866 * Destroy all the objs in a slab page, and release the mem back to the system.
1867 * Before calling the slab page must have been unlinked from the cache. The
1868 * kmem_cache_node ->list_lock is not held/needed.
1869 */
1870static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1871{
1872        void *freelist;
1873
1874        freelist = page->freelist;
1875        slab_destroy_debugcheck(cachep, page);
1876        if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1877                struct rcu_head *head;
1878
1879                /*
1880                 * RCU free overloads the RCU head over the LRU.
1881                 * slab_page has been overloeaded over the LRU,
1882                 * however it is not used from now on so that
1883                 * we can use it safely.
1884                 */
1885                head = (void *)&page->rcu_head;
1886                call_rcu(head, kmem_rcu_free);
1887
1888        } else {
1889                kmem_freepages(cachep, page);
1890        }
1891
1892        /*
1893         * From now on, we don't use freelist
1894         * although actual page can be freed in rcu context
1895         */
1896        if (OFF_SLAB(cachep))
1897                kmem_cache_free(cachep->freelist_cache, freelist);
1898}
1899
1900static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1901{
1902        struct page *page, *n;
1903
1904        list_for_each_entry_safe(page, n, list, lru) {
1905                list_del(&page->lru);
1906                slab_destroy(cachep, page);
1907        }
1908}
1909
1910/**
1911 * calculate_slab_order - calculate size (page order) of slabs
1912 * @cachep: pointer to the cache that is being created
1913 * @size: size of objects to be created in this cache.
1914 * @align: required alignment for the objects.
1915 * @flags: slab allocation flags
1916 *
1917 * Also calculates the number of objects per slab.
1918 *
1919 * This could be made much more intelligent.  For now, try to avoid using
1920 * high order pages for slabs.  When the gfp() functions are more friendly
1921 * towards high-order requests, this should be changed.
1922 */
1923static size_t calculate_slab_order(struct kmem_cache *cachep,
1924                        size_t size, size_t align, unsigned long flags)
1925{
1926        unsigned long offslab_limit;
1927        size_t left_over = 0;
1928        int gfporder;
1929
1930        for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1931                unsigned int num;
1932                size_t remainder;
1933
1934                cache_estimate(gfporder, size, align, flags, &remainder, &num);
1935                if (!num)
1936                        continue;
1937
1938                /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
1939                if (num > SLAB_OBJ_MAX_NUM)
1940                        break;
1941
1942                if (flags & CFLGS_OFF_SLAB) {
1943                        size_t freelist_size_per_obj = sizeof(freelist_idx_t);
1944                        /*
1945                         * Max number of objs-per-slab for caches which
1946                         * use off-slab slabs. Needed to avoid a possible
1947                         * looping condition in cache_grow().
1948                         */
1949                        if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
1950                                freelist_size_per_obj += sizeof(char);
1951                        offslab_limit = size;
1952                        offslab_limit /= freelist_size_per_obj;
1953
1954                        if (num > offslab_limit)
1955                                break;
1956                }
1957
1958                /* Found something acceptable - save it away */
1959                cachep->num = num;
1960                cachep->gfporder = gfporder;
1961                left_over = remainder;
1962
1963                /*
1964                 * A VFS-reclaimable slab tends to have most allocations
1965                 * as GFP_NOFS and we really don't want to have to be allocating
1966                 * higher-order pages when we are unable to shrink dcache.
1967                 */
1968                if (flags & SLAB_RECLAIM_ACCOUNT)
1969                        break;
1970
1971                /*
1972                 * Large number of objects is good, but very large slabs are
1973                 * currently bad for the gfp()s.
1974                 */
1975                if (gfporder >= slab_max_order)
1976                        break;
1977
1978                /*
1979                 * Acceptable internal fragmentation?
1980                 */
1981                if (left_over * 8 <= (PAGE_SIZE << gfporder))
1982                        break;
1983        }
1984        return left_over;
1985}
1986
1987static struct array_cache __percpu *alloc_kmem_cache_cpus(
1988                struct kmem_cache *cachep, int entries, int batchcount)
1989{
1990        int cpu;
1991        size_t size;
1992        struct array_cache __percpu *cpu_cache;
1993
1994        size = sizeof(void *) * entries + sizeof(struct array_cache);
1995        cpu_cache = __alloc_percpu(size, sizeof(void *));
1996
1997        if (!cpu_cache)
1998                return NULL;
1999
2000        for_each_possible_cpu(cpu) {
2001                init_arraycache(per_cpu_ptr(cpu_cache, cpu),
2002                                entries, batchcount);
2003        }
2004
2005        return cpu_cache;
2006}
2007
2008static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2009{
2010        if (slab_state >= FULL)
2011                return enable_cpucache(cachep, gfp);
2012
2013        cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
2014        if (!cachep->cpu_cache)
2015                return 1;
2016
2017        if (slab_state == DOWN) {
2018                /* Creation of first cache (kmem_cache). */
2019                set_up_node(kmem_cache, CACHE_CACHE);
2020        } else if (slab_state == PARTIAL) {
2021                /* For kmem_cache_node */
2022                set_up_node(cachep, SIZE_NODE);
2023        } else {
2024                int node;
2025
2026                for_each_online_node(node) {
2027                        cachep->node[node] = kmalloc_node(
2028                                sizeof(struct kmem_cache_node), gfp, node);
2029                        BUG_ON(!cachep->node[node]);
2030                        kmem_cache_node_init(cachep->node[node]);
2031                }
2032        }
2033
2034        cachep->node[numa_mem_id()]->next_reap =
2035                        jiffies + REAPTIMEOUT_NODE +
2036                        ((unsigned long)cachep) % REAPTIMEOUT_NODE;
2037
2038        cpu_cache_get(cachep)->avail = 0;
2039        cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2040        cpu_cache_get(cachep)->batchcount = 1;
2041        cpu_cache_get(cachep)->touched = 0;
2042        cachep->batchcount = 1;
2043        cachep->limit = BOOT_CPUCACHE_ENTRIES;
2044        return 0;
2045}
2046
2047unsigned long kmem_cache_flags(unsigned long object_size,
2048        unsigned long flags, const char *name,
2049        void (*ctor)(void *))
2050{
2051        return flags;
2052}
2053
2054struct kmem_cache *
2055__kmem_cache_alias(const char *name, size_t size, size_t align,
2056                   unsigned long flags, void (*ctor)(void *))
2057{
2058        struct kmem_cache *cachep;
2059
2060        cachep = find_mergeable(size, align, flags, name, ctor);
2061        if (cachep) {
2062                cachep->refcount++;
2063
2064                /*
2065                 * Adjust the object sizes so that we clear
2066                 * the complete object on kzalloc.
2067                 */
2068                cachep->object_size = max_t(int, cachep->object_size, size);
2069        }
2070        return cachep;
2071}
2072
2073/**
2074 * __kmem_cache_create - Create a cache.
2075 * @cachep: cache management descriptor
2076 * @flags: SLAB flags
2077 *
2078 * Returns a ptr to the cache on success, NULL on failure.
2079 * Cannot be called within a int, but can be interrupted.
2080 * The @ctor is run when new pages are allocated by the cache.
2081 *
2082 * The flags are
2083 *
2084 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2085 * to catch references to uninitialised memory.
2086 *
2087 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2088 * for buffer overruns.
2089 *
2090 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2091 * cacheline.  This can be beneficial if you're counting cycles as closely
2092 * as davem.
2093 */
2094int
2095__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2096{
2097        size_t left_over, freelist_size;
2098        size_t ralign = BYTES_PER_WORD;
2099        gfp_t gfp;
2100        int err;
2101        size_t size = cachep->size;
2102
2103#if DEBUG
2104#if FORCED_DEBUG
2105        /*
2106         * Enable redzoning and last user accounting, except for caches with
2107         * large objects, if the increased size would increase the object size
2108         * above the next power of two: caches with object sizes just above a
2109         * power of two have a significant amount of internal fragmentation.
2110         */
2111        if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2112                                                2 * sizeof(unsigned long long)))
2113                flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2114        if (!(flags & SLAB_DESTROY_BY_RCU))
2115                flags |= SLAB_POISON;
2116#endif
2117        if (flags & SLAB_DESTROY_BY_RCU)
2118                BUG_ON(flags & SLAB_POISON);
2119#endif
2120
2121        /*
2122         * Check that size is in terms of words.  This is needed to avoid
2123         * unaligned accesses for some archs when redzoning is used, and makes
2124         * sure any on-slab bufctl's are also correctly aligned.
2125         */
2126        if (size & (BYTES_PER_WORD - 1)) {
2127                size += (BYTES_PER_WORD - 1);
2128                size &= ~(BYTES_PER_WORD - 1);
2129        }
2130
2131        if (flags & SLAB_RED_ZONE) {
2132                ralign = REDZONE_ALIGN;
2133                /* If redzoning, ensure that the second redzone is suitably
2134                 * aligned, by adjusting the object size accordingly. */
2135                size += REDZONE_ALIGN - 1;
2136                size &= ~(REDZONE_ALIGN - 1);
2137        }
2138
2139        /* 3) caller mandated alignment */
2140        if (ralign < cachep->align) {
2141                ralign = cachep->align;
2142        }
2143        /* disable debug if necessary */
2144        if (ralign > __alignof__(unsigned long long))
2145                flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2146        /*
2147         * 4) Store it.
2148         */
2149        cachep->align = ralign;
2150
2151        if (slab_is_available())
2152                gfp = GFP_KERNEL;
2153        else
2154                gfp = GFP_NOWAIT;
2155
2156#if DEBUG
2157
2158        /*
2159         * Both debugging options require word-alignment which is calculated
2160         * into align above.
2161         */
2162        if (flags & SLAB_RED_ZONE) {
2163                /* add space for red zone words */
2164                cachep->obj_offset += sizeof(unsigned long long);
2165                size += 2 * sizeof(unsigned long long);
2166        }
2167        if (flags & SLAB_STORE_USER) {
2168                /* user store requires one word storage behind the end of
2169                 * the real object. But if the second red zone needs to be
2170                 * aligned to 64 bits, we must allow that much space.
2171                 */
2172                if (flags & SLAB_RED_ZONE)
2173                        size += REDZONE_ALIGN;
2174                else
2175                        size += BYTES_PER_WORD;
2176        }
2177#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2178        if (size >= kmalloc_size(INDEX_NODE + 1)
2179            && cachep->object_size > cache_line_size()
2180            && ALIGN(size, cachep->align) < PAGE_SIZE) {
2181                cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
2182                size = PAGE_SIZE;
2183        }
2184#endif
2185#endif
2186
2187        /*
2188         * Determine if the slab management is 'on' or 'off' slab.
2189         * (bootstrapping cannot cope with offslab caches so don't do
2190         * it too early on. Always use on-slab management when
2191         * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2192         */
2193        if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init &&
2194            !(flags & SLAB_NOLEAKTRACE))
2195                /*
2196                 * Size is large, assume best to place the slab management obj
2197                 * off-slab (should allow better packing of objs).
2198                 */
2199                flags |= CFLGS_OFF_SLAB;
2200
2201        size = ALIGN(size, cachep->align);
2202        /*
2203         * We should restrict the number of objects in a slab to implement
2204         * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2205         */
2206        if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2207                size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2208
2209        left_over = calculate_slab_order(cachep, size, cachep->align, flags);
2210
2211        if (!cachep->num)
2212                return -E2BIG;
2213
2214        freelist_size = calculate_freelist_size(cachep->num, cachep->align);
2215
2216        /*
2217         * If the slab has been placed off-slab, and we have enough space then
2218         * move it on-slab. This is at the expense of any extra colouring.
2219         */
2220        if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
2221                flags &= ~CFLGS_OFF_SLAB;
2222                left_over -= freelist_size;
2223        }
2224
2225        if (flags & CFLGS_OFF_SLAB) {
2226                /* really off slab. No need for manual alignment */
2227                freelist_size = calculate_freelist_size(cachep->num, 0);
2228
2229#ifdef CONFIG_PAGE_POISONING
2230                /* If we're going to use the generic kernel_map_pages()
2231                 * poisoning, then it's going to smash the contents of
2232                 * the redzone and userword anyhow, so switch them off.
2233                 */
2234                if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2235                        flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2236#endif
2237        }
2238
2239        cachep->colour_off = cache_line_size();
2240        /* Offset must be a multiple of the alignment. */
2241        if (cachep->colour_off < cachep->align)
2242                cachep->colour_off = cachep->align;
2243        cachep->colour = left_over / cachep->colour_off;
2244        cachep->freelist_size = freelist_size;
2245        cachep->flags = flags;
2246        cachep->allocflags = __GFP_COMP;
2247        if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2248                cachep->allocflags |= GFP_DMA;
2249        cachep->size = size;
2250        cachep->reciprocal_buffer_size = reciprocal_value(size);
2251
2252        if (flags & CFLGS_OFF_SLAB) {
2253                cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
2254                /*
2255                 * This is a possibility for one of the kmalloc_{dma,}_caches.
2256                 * But since we go off slab only for object size greater than
2257                 * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created
2258                 * in ascending order,this should not happen at all.
2259                 * But leave a BUG_ON for some lucky dude.
2260                 */
2261                BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
2262        }
2263
2264        err = setup_cpu_cache(cachep, gfp);
2265        if (err) {
2266                __kmem_cache_shutdown(cachep);
2267                return err;
2268        }
2269
2270        return 0;
2271}
2272
2273#if DEBUG
2274static void check_irq_off(void)
2275{
2276        BUG_ON(!irqs_disabled());
2277}
2278
2279static void check_irq_on(void)
2280{
2281        BUG_ON(irqs_disabled());
2282}
2283
2284static void check_spinlock_acquired(struct kmem_cache *cachep)
2285{
2286#ifdef CONFIG_SMP
2287        check_irq_off();
2288        assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2289#endif
2290}
2291
2292static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2293{
2294#ifdef CONFIG_SMP
2295        check_irq_off();
2296        assert_spin_locked(&get_node(cachep, node)->list_lock);
2297#endif
2298}
2299
2300#else
2301#define check_irq_off() do { } while(0)
2302#define check_irq_on()  do { } while(0)
2303#define check_spinlock_acquired(x) do { } while(0)
2304#define check_spinlock_acquired_node(x, y) do { } while(0)
2305#endif
2306
2307static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2308                        struct array_cache *ac,
2309                        int force, int node);
2310
2311static void do_drain(void *arg)
2312{
2313        struct kmem_cache *cachep = arg;
2314        struct array_cache *ac;
2315        int node = numa_mem_id();
2316        struct kmem_cache_node *n;
2317        LIST_HEAD(list);
2318
2319        check_irq_off();
2320        ac = cpu_cache_get(cachep);
2321        n = get_node(cachep, node);
2322        spin_lock(&n->list_lock);
2323        free_block(cachep, ac->entry, ac->avail, node, &list);
2324        spin_unlock(&n->list_lock);
2325        slabs_destroy(cachep, &list);
2326        ac->avail = 0;
2327}
2328
2329static void drain_cpu_caches(struct kmem_cache *cachep)
2330{
2331        struct kmem_cache_node *n;
2332        int node;
2333
2334        on_each_cpu(do_drain, cachep, 1);
2335        check_irq_on();
2336        for_each_kmem_cache_node(cachep, node, n)
2337                if (n->alien)
2338                        drain_alien_cache(cachep, n->alien);
2339
2340        for_each_kmem_cache_node(cachep, node, n)
2341                drain_array(cachep, n, n->shared, 1, node);
2342}
2343
2344/*
2345 * Remove slabs from the list of free slabs.
2346 * Specify the number of slabs to drain in tofree.
2347 *
2348 * Returns the actual number of slabs released.
2349 */
2350static int drain_freelist(struct kmem_cache *cache,
2351                        struct kmem_cache_node *n, int tofree)
2352{
2353        struct list_head *p;
2354        int nr_freed;
2355        struct page *page;
2356
2357        nr_freed = 0;
2358        while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2359
2360                spin_lock_irq(&n->list_lock);
2361                p = n->slabs_free.prev;
2362                if (p == &n->slabs_free) {
2363                        spin_unlock_irq(&n->list_lock);
2364                        goto out;
2365                }
2366
2367                page = list_entry(p, struct page, lru);
2368#if DEBUG
2369                BUG_ON(page->active);
2370#endif
2371                list_del(&page->lru);
2372                /*
2373                 * Safe to drop the lock. The slab is no longer linked
2374                 * to the cache.
2375                 */
2376                n->free_objects -= cache->num;
2377                spin_unlock_irq(&n->list_lock);
2378                slab_destroy(cache, page);
2379                nr_freed++;
2380        }
2381out:
2382        return nr_freed;
2383}
2384
2385int __kmem_cache_shrink(struct kmem_cache *cachep)
2386{
2387        int ret = 0;
2388        int node;
2389        struct kmem_cache_node *n;
2390
2391        drain_cpu_caches(cachep);
2392
2393        check_irq_on();
2394        for_each_kmem_cache_node(cachep, node, n) {
2395                drain_freelist(cachep, n, slabs_tofree(cachep, n));
2396
2397                ret += !list_empty(&n->slabs_full) ||
2398                        !list_empty(&n->slabs_partial);
2399        }
2400        return (ret ? 1 : 0);
2401}
2402
2403int __kmem_cache_shutdown(struct kmem_cache *cachep)
2404{
2405        int i;
2406        struct kmem_cache_node *n;
2407        int rc = __kmem_cache_shrink(cachep);
2408
2409        if (rc)
2410                return rc;
2411
2412        free_percpu(cachep->cpu_cache);
2413
2414        /* NUMA: free the node structures */
2415        for_each_kmem_cache_node(cachep, i, n) {
2416                kfree(n->shared);
2417                free_alien_cache(n->alien);
2418                kfree(n);
2419                cachep->node[i] = NULL;
2420        }
2421        return 0;
2422}
2423
2424/*
2425 * Get the memory for a slab management obj.
2426 *
2427 * For a slab cache when the slab descriptor is off-slab, the
2428 * slab descriptor can't come from the same cache which is being created,
2429 * Because if it is the case, that means we defer the creation of
2430 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2431 * And we eventually call down to __kmem_cache_create(), which
2432 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2433 * This is a "chicken-and-egg" problem.
2434 *
2435 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2436 * which are all initialized during kmem_cache_init().
2437 */
2438static void *alloc_slabmgmt(struct kmem_cache *cachep,
2439                                   struct page *page, int colour_off,
2440                                   gfp_t local_flags, int nodeid)
2441{
2442        void *freelist;
2443        void *addr = page_address(page);
2444
2445        if (OFF_SLAB(cachep)) {
2446                /* Slab management obj is off-slab. */
2447                freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2448                                              local_flags, nodeid);
2449                if (!freelist)
2450                        return NULL;
2451        } else {
2452                freelist = addr + colour_off;
2453                colour_off += cachep->freelist_size;
2454        }
2455        page->active = 0;
2456        page->s_mem = addr + colour_off;
2457        return freelist;
2458}
2459
2460static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2461{
2462        return ((freelist_idx_t *)page->freelist)[idx];
2463}
2464
2465static inline void set_free_obj(struct page *page,
2466                                        unsigned int idx, freelist_idx_t val)
2467{
2468        ((freelist_idx_t *)(page->freelist))[idx] = val;
2469}
2470
2471static void cache_init_objs(struct kmem_cache *cachep,
2472                            struct page *page)
2473{
2474        int i;
2475
2476        for (i = 0; i < cachep->num; i++) {
2477                void *objp = index_to_obj(cachep, page, i);
2478#if DEBUG
2479                /* need to poison the objs? */
2480                if (cachep->flags & SLAB_POISON)
2481                        poison_obj(cachep, objp, POISON_FREE);
2482                if (cachep->flags & SLAB_STORE_USER)
2483                        *dbg_userword(cachep, objp) = NULL;
2484
2485                if (cachep->flags & SLAB_RED_ZONE) {
2486                        *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2487                        *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2488                }
2489                /*
2490                 * Constructors are not allowed to allocate memory from the same
2491                 * cache which they are a constructor for.  Otherwise, deadlock.
2492                 * They must also be threaded.
2493                 */
2494                if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2495                        cachep->ctor(objp + obj_offset(cachep));
2496
2497                if (cachep->flags & SLAB_RED_ZONE) {
2498                        if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2499                                slab_error(cachep, "constructor overwrote the"
2500                                           " end of an object");
2501                        if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2502                                slab_error(cachep, "constructor overwrote the"
2503                                           " start of an object");
2504                }
2505                if ((cachep->size % PAGE_SIZE) == 0 &&
2506                            OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2507                        kernel_map_pages(virt_to_page(objp),
2508                                         cachep->size / PAGE_SIZE, 0);
2509#else
2510                if (cachep->ctor)
2511                        cachep->ctor(objp);
2512#endif
2513                set_obj_status(page, i, OBJECT_FREE);
2514                set_free_obj(page, i, i);
2515        }
2516}
2517
2518static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2519{
2520        if (CONFIG_ZONE_DMA_FLAG) {
2521                if (flags & GFP_DMA)
2522                        BUG_ON(!(cachep->allocflags & GFP_DMA));
2523                else
2524                        BUG_ON(cachep->allocflags & GFP_DMA);
2525        }
2526}
2527
2528static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
2529                                int nodeid)
2530{
2531        void *objp;
2532
2533        objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2534        page->active++;
2535#if DEBUG
2536        WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2537#endif
2538
2539        return objp;
2540}
2541
2542static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
2543                                void *objp, int nodeid)
2544{
2545        unsigned int objnr = obj_to_index(cachep, page, objp);
2546#if DEBUG
2547        unsigned int i;
2548
2549        /* Verify that the slab belongs to the intended node */
2550        WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2551
2552        /* Verify double free bug */
2553        for (i = page->active; i < cachep->num; i++) {
2554                if (get_free_obj(page, i) == objnr) {
2555                        printk(KERN_ERR "slab: double free detected in cache "
2556                                        "'%s', objp %p\n", cachep->name, objp);
2557                        BUG();
2558                }
2559        }
2560#endif
2561        page->active--;
2562        set_free_obj(page, page->active, objnr);
2563}
2564
2565/*
2566 * Map pages beginning at addr to the given cache and slab. This is required
2567 * for the slab allocator to be able to lookup the cache and slab of a
2568 * virtual address for kfree, ksize, and slab debugging.
2569 */
2570static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2571                           void *freelist)
2572{
2573        page->slab_cache = cache;
2574        page->freelist = freelist;
2575}
2576
2577/*
2578 * Grow (by 1) the number of slabs within a cache.  This is called by
2579 * kmem_cache_alloc() when there are no active objs left in a cache.
2580 */
2581static int cache_grow(struct kmem_cache *cachep,
2582                gfp_t flags, int nodeid, struct page *page)
2583{
2584        void *freelist;
2585        size_t offset;
2586        gfp_t local_flags;
2587        struct kmem_cache_node *n;
2588
2589        /*
2590         * Be lazy and only check for valid flags here,  keeping it out of the
2591         * critical path in kmem_cache_alloc().
2592         */
2593        BUG_ON(flags & GFP_SLAB_BUG_MASK);
2594        local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2595
2596        /* Take the node list lock to change the colour_next on this node */
2597        check_irq_off();
2598        n = get_node(cachep, nodeid);
2599        spin_lock(&n->list_lock);
2600
2601        /* Get colour for the slab, and cal the next value. */
2602        offset = n->colour_next;
2603        n->colour_next++;
2604        if (n->colour_next >= cachep->colour)
2605                n->colour_next = 0;
2606        spin_unlock(&n->list_lock);
2607
2608        offset *= cachep->colour_off;
2609
2610        if (local_flags & __GFP_WAIT)
2611                local_irq_enable();
2612
2613        /*
2614         * The test for missing atomic flag is performed here, rather than
2615         * the more obvious place, simply to reduce the critical path length
2616         * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2617         * will eventually be caught here (where it matters).
2618         */
2619        kmem_flagcheck(cachep, flags);
2620
2621        /*
2622         * Get mem for the objs.  Attempt to allocate a physical page from
2623         * 'nodeid'.
2624         */
2625        if (!page)
2626                page = kmem_getpages(cachep, local_flags, nodeid);
2627        if (!page)
2628                goto failed;
2629
2630        /* Get slab management. */
2631        freelist = alloc_slabmgmt(cachep, page, offset,
2632                        local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2633        if (!freelist)
2634                goto opps1;
2635
2636        slab_map_pages(cachep, page, freelist);
2637
2638        cache_init_objs(cachep, page);
2639
2640        if (local_flags & __GFP_WAIT)
2641                local_irq_disable();
2642        check_irq_off();
2643        spin_lock(&n->list_lock);
2644
2645        /* Make slab active. */
2646        list_add_tail(&page->lru, &(n->slabs_free));
2647        STATS_INC_GROWN(cachep);
2648        n->free_objects += cachep->num;
2649        spin_unlock(&n->list_lock);
2650        return 1;
2651opps1:
2652        kmem_freepages(cachep, page);
2653failed:
2654        if (local_flags & __GFP_WAIT)
2655                local_irq_disable();
2656        return 0;
2657}
2658
2659#if DEBUG
2660
2661/*
2662 * Perform extra freeing checks:
2663 * - detect bad pointers.
2664 * - POISON/RED_ZONE checking
2665 */
2666static void kfree_debugcheck(const void *objp)
2667{
2668        if (!virt_addr_valid(objp)) {
2669                printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2670                       (unsigned long)objp);
2671                BUG();
2672        }
2673}
2674
2675static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2676{
2677        unsigned long long redzone1, redzone2;
2678
2679        redzone1 = *dbg_redzone1(cache, obj);
2680        redzone2 = *dbg_redzone2(cache, obj);
2681
2682        /*
2683         * Redzone is ok.
2684         */
2685        if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2686                return;
2687
2688        if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2689                slab_error(cache, "double free detected");
2690        else
2691                slab_error(cache, "memory outside object was overwritten");
2692
2693        printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2694                        obj, redzone1, redzone2);
2695}
2696
2697static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2698                                   unsigned long caller)
2699{
2700        unsigned int objnr;
2701        struct page *page;
2702
2703        BUG_ON(virt_to_cache(objp) != cachep);
2704
2705        objp -= obj_offset(cachep);
2706        kfree_debugcheck(objp);
2707        page = virt_to_head_page(objp);
2708
2709        if (cachep->flags & SLAB_RED_ZONE) {
2710                verify_redzone_free(cachep, objp);
2711                *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2712                *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2713        }
2714        if (cachep->flags & SLAB_STORE_USER)
2715                *dbg_userword(cachep, objp) = (void *)caller;
2716
2717        objnr = obj_to_index(cachep, page, objp);
2718
2719        BUG_ON(objnr >= cachep->num);
2720        BUG_ON(objp != index_to_obj(cachep, page, objnr));
2721
2722        set_obj_status(page, objnr, OBJECT_FREE);
2723        if (cachep->flags & SLAB_POISON) {
2724#ifdef CONFIG_DEBUG_PAGEALLOC
2725                if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2726                        store_stackinfo(cachep, objp, caller);
2727                        kernel_map_pages(virt_to_page(objp),
2728                                         cachep->size / PAGE_SIZE, 0);
2729                } else {
2730                        poison_obj(cachep, objp, POISON_FREE);
2731                }
2732#else
2733                poison_obj(cachep, objp, POISON_FREE);
2734#endif
2735        }
2736        return objp;
2737}
2738
2739#else
2740#define kfree_debugcheck(x) do { } while(0)
2741#define cache_free_debugcheck(x,objp,z) (objp)
2742#endif
2743
2744static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
2745                                                        bool force_refill)
2746{
2747        int batchcount;
2748        struct kmem_cache_node *n;
2749        struct array_cache *ac;
2750        int node;
2751
2752        check_irq_off();
2753        node = numa_mem_id();
2754        if (unlikely(force_refill))
2755                goto force_grow;
2756retry:
2757        ac = cpu_cache_get(cachep);
2758        batchcount = ac->batchcount;
2759        if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2760                /*
2761                 * If there was little recent activity on this cache, then
2762                 * perform only a partial refill.  Otherwise we could generate
2763                 * refill bouncing.
2764                 */
2765                batchcount = BATCHREFILL_LIMIT;
2766        }
2767        n = get_node(cachep, node);
2768
2769        BUG_ON(ac->avail > 0 || !n);
2770        spin_lock(&n->list_lock);
2771
2772        /* See if we can refill from the shared array */
2773        if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
2774                n->shared->touched = 1;
2775                goto alloc_done;
2776        }
2777
2778        while (batchcount > 0) {
2779                struct list_head *entry;
2780                struct page *page;
2781                /* Get slab alloc is to come from. */
2782                entry = n->slabs_partial.next;
2783                if (entry == &n->slabs_partial) {
2784                        n->free_touched = 1;
2785                        entry = n->slabs_free.next;
2786                        if (entry == &n->slabs_free)
2787                                goto must_grow;
2788                }
2789
2790                page = list_entry(entry, struct page, lru);
2791                check_spinlock_acquired(cachep);
2792
2793                /*
2794                 * The slab was either on partial or free list so
2795                 * there must be at least one object available for
2796                 * allocation.
2797                 */
2798                BUG_ON(page->active >= cachep->num);
2799
2800                while (page->active < cachep->num && batchcount--) {
2801                        STATS_INC_ALLOCED(cachep);
2802                        STATS_INC_ACTIVE(cachep);
2803                        STATS_SET_HIGH(cachep);
2804
2805                        ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
2806                                                                        node));
2807                }
2808
2809                /* move slabp to correct slabp list: */
2810                list_del(&page->lru);
2811                if (page->active == cachep->num)
2812                        list_add(&page->lru, &n->slabs_full);
2813                else
2814                        list_add(&page->lru, &n->slabs_partial);
2815        }
2816
2817must_grow:
2818        n->free_objects -= ac->avail;
2819alloc_done:
2820        spin_unlock(&n->list_lock);
2821
2822        if (unlikely(!ac->avail)) {
2823                int x;
2824force_grow:
2825                x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
2826
2827                /* cache_grow can reenable interrupts, then ac could change. */
2828                ac = cpu_cache_get(cachep);
2829                node = numa_mem_id();
2830
2831                /* no objects in sight? abort */
2832                if (!x && (ac->avail == 0 || force_refill))
2833                        return NULL;
2834
2835                if (!ac->avail)         /* objects refilled by interrupt? */
2836                        goto retry;
2837        }
2838        ac->touched = 1;
2839
2840        return ac_get_obj(cachep, ac, flags, force_refill);
2841}
2842
2843static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2844                                                gfp_t flags)
2845{
2846        might_sleep_if(flags & __GFP_WAIT);
2847#if DEBUG
2848        kmem_flagcheck(cachep, flags);
2849#endif
2850}
2851
2852#if DEBUG
2853static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2854                                gfp_t flags, void *objp, unsigned long caller)
2855{
2856        struct page *page;
2857
2858        if (!objp)
2859                return objp;
2860        if (cachep->flags & SLAB_POISON) {
2861#ifdef CONFIG_DEBUG_PAGEALLOC
2862                if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
2863                        kernel_map_pages(virt_to_page(objp),
2864                                         cachep->size / PAGE_SIZE, 1);
2865                else
2866                        check_poison_obj(cachep, objp);
2867#else
2868                check_poison_obj(cachep, objp);
2869#endif
2870                poison_obj(cachep, objp, POISON_INUSE);
2871        }
2872        if (cachep->flags & SLAB_STORE_USER)
2873                *dbg_userword(cachep, objp) = (void *)caller;
2874
2875        if (cachep->flags & SLAB_RED_ZONE) {
2876                if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2877                                *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2878                        slab_error(cachep, "double free, or memory outside"
2879                                                " object was overwritten");
2880                        printk(KERN_ERR
2881                                "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2882                                objp, *dbg_redzone1(cachep, objp),
2883                                *dbg_redzone2(cachep, objp));
2884                }
2885                *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2886                *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2887        }
2888
2889        page = virt_to_head_page(objp);
2890        set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
2891        objp += obj_offset(cachep);
2892        if (cachep->ctor && cachep->flags & SLAB_POISON)
2893                cachep->ctor(objp);
2894        if (ARCH_SLAB_MINALIGN &&
2895            ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
2896                printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
2897                       objp, (int)ARCH_SLAB_MINALIGN);
2898        }
2899        return objp;
2900}
2901#else
2902#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2903#endif
2904
2905static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
2906{
2907        if (unlikely(cachep == kmem_cache))
2908                return false;
2909
2910        return should_failslab(cachep->object_size, flags, cachep->flags);
2911}
2912
2913static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
2914{
2915        void *objp;
2916        struct array_cache *ac;
2917        bool force_refill = false;
2918
2919        check_irq_off();
2920
2921        ac = cpu_cache_get(cachep);
2922        if (likely(ac->avail)) {
2923                ac->touched = 1;
2924                objp = ac_get_obj(cachep, ac, flags, false);
2925
2926                /*
2927                 * Allow for the possibility all avail objects are not allowed
2928                 * by the current flags
2929                 */
2930                if (objp) {
2931                        STATS_INC_ALLOCHIT(cachep);
2932                        goto out;
2933                }
2934                force_refill = true;
2935        }
2936
2937        STATS_INC_ALLOCMISS(cachep);
2938        objp = cache_alloc_refill(cachep, flags, force_refill);
2939        /*
2940         * the 'ac' may be updated by cache_alloc_refill(),
2941         * and kmemleak_erase() requires its correct value.
2942         */
2943        ac = cpu_cache_get(cachep);
2944
2945out:
2946        /*
2947         * To avoid a false negative, if an object that is in one of the
2948         * per-CPU caches is leaked, we need to make sure kmemleak doesn't
2949         * treat the array pointers as a reference to the object.
2950         */
2951        if (objp)
2952                kmemleak_erase(&ac->entry[ac->avail]);
2953        return objp;
2954}
2955
2956#ifdef CONFIG_NUMA
2957/*
2958 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
2959 *
2960 * If we are in_interrupt, then process context, including cpusets and
2961 * mempolicy, may not apply and should not be used for allocation policy.
2962 */
2963static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
2964{
2965        int nid_alloc, nid_here;
2966
2967        if (in_interrupt() || (flags & __GFP_THISNODE))
2968                return NULL;
2969        nid_alloc = nid_here = numa_mem_id();
2970        if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
2971                nid_alloc = cpuset_slab_spread_node();
2972        else if (current->mempolicy)
2973                nid_alloc = mempolicy_slab_node();
2974        if (nid_alloc != nid_here)
2975                return ____cache_alloc_node(cachep, flags, nid_alloc);
2976        return NULL;
2977}
2978
2979/*
2980 * Fallback function if there was no memory available and no objects on a
2981 * certain node and fall back is permitted. First we scan all the
2982 * available node for available objects. If that fails then we
2983 * perform an allocation without specifying a node. This allows the page
2984 * allocator to do its reclaim / fallback magic. We then insert the
2985 * slab into the proper nodelist and then allocate from it.
2986 */
2987static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
2988{
2989        struct zonelist *zonelist;
2990        gfp_t local_flags;
2991        struct zoneref *z;
2992        struct zone *zone;
2993        enum zone_type high_zoneidx = gfp_zone(flags);
2994        void *obj = NULL;
2995        int nid;
2996        unsigned int cpuset_mems_cookie;
2997
2998        if (flags & __GFP_THISNODE)
2999                return NULL;
3000
3001        local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3002
3003retry_cpuset:
3004        cpuset_mems_cookie = read_mems_allowed_begin();
3005        zonelist = node_zonelist(mempolicy_slab_node(), flags);
3006
3007retry:
3008        /*
3009         * Look through allowed nodes for objects available
3010         * from existing per node queues.
3011         */
3012        for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3013                nid = zone_to_nid(zone);
3014
3015                if (cpuset_zone_allowed_hardwall(zone, flags) &&
3016                        get_node(cache, nid) &&
3017                        get_node(cache, nid)->free_objects) {
3018                                obj = ____cache_alloc_node(cache,
3019                                        flags | GFP_THISNODE, nid);
3020                                if (obj)
3021                                        break;
3022                }
3023        }
3024
3025        if (!obj) {
3026                /*
3027                 * This allocation will be performed within the constraints
3028                 * of the current cpuset / memory policy requirements.
3029                 * We may trigger various forms of reclaim on the allowed
3030                 * set and go into memory reserves if necessary.
3031                 */
3032                struct page *page;
3033
3034                if (local_flags & __GFP_WAIT)
3035                        local_irq_enable();
3036                kmem_flagcheck(cache, flags);
3037                page = kmem_getpages(cache, local_flags, numa_mem_id());
3038                if (local_flags & __GFP_WAIT)
3039                        local_irq_disable();
3040                if (page) {
3041                        /*
3042                         * Insert into the appropriate per node queues
3043                         */
3044                        nid = page_to_nid(page);
3045                        if (cache_grow(cache, flags, nid, page)) {
3046                                obj = ____cache_alloc_node(cache,
3047                                        flags | GFP_THISNODE, nid);
3048                                if (!obj)
3049                                        /*
3050                                         * Another processor may allocate the
3051                                         * objects in the slab since we are
3052                                         * not holding any locks.
3053                                         */
3054                                        goto retry;
3055                        } else {
3056                                /* cache_grow already freed obj */
3057                                obj = NULL;
3058                        }
3059                }
3060        }
3061
3062        if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3063                goto retry_cpuset;
3064        return obj;
3065}
3066
3067/*
3068 * A interface to enable slab creation on nodeid
3069 */
3070static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3071                                int nodeid)
3072{
3073        struct list_head *entry;
3074        struct page *page;
3075        struct kmem_cache_node *n;
3076        void *obj;
3077        int x;
3078
3079        VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3080        n = get_node(cachep, nodeid);
3081        BUG_ON(!n);
3082
3083retry:
3084        check_irq_off();
3085        spin_lock(&n->list_lock);
3086        entry = n->slabs_partial.next;
3087        if (entry == &n->slabs_partial) {
3088                n->free_touched = 1;
3089                entry = n->slabs_free.next;
3090                if (entry == &n->slabs_free)
3091                        goto must_grow;
3092        }
3093
3094        page = list_entry(entry, struct page, lru);
3095        check_spinlock_acquired_node(cachep, nodeid);
3096
3097        STATS_INC_NODEALLOCS(cachep);
3098        STATS_INC_ACTIVE(cachep);
3099        STATS_SET_HIGH(cachep);
3100
3101        BUG_ON(page->active == cachep->num);
3102
3103        obj = slab_get_obj(cachep, page, nodeid);
3104        n->free_objects--;
3105        /* move slabp to correct slabp list: */
3106        list_del(&page->lru);
3107
3108        if (page->active == cachep->num)
3109                list_add(&page->lru, &n->slabs_full);
3110        else
3111                list_add(&page->lru, &n->slabs_partial);
3112
3113        spin_unlock(&n->list_lock);
3114        goto done;
3115
3116must_grow:
3117        spin_unlock(&n->list_lock);
3118        x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3119        if (x)
3120                goto retry;
3121
3122        return fallback_alloc(cachep, flags);
3123
3124done:
3125        return obj;
3126}
3127
3128static __always_inline void *
3129slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3130                   unsigned long caller)
3131{
3132        unsigned long save_flags;
3133        void *ptr;
3134        int slab_node = numa_mem_id();
3135
3136        flags &= gfp_allowed_mask;
3137
3138        lockdep_trace_alloc(flags);
3139
3140        if (slab_should_failslab(cachep, flags))
3141                return NULL;
3142
3143        cachep = memcg_kmem_get_cache(cachep, flags);
3144
3145        cache_alloc_debugcheck_before(cachep, flags);
3146        local_irq_save(save_flags);
3147
3148        if (nodeid == NUMA_NO_NODE)
3149                nodeid = slab_node;
3150
3151        if (unlikely(!get_node(cachep, nodeid))) {
3152                /* Node not bootstrapped yet */
3153                ptr = fallback_alloc(cachep, flags);
3154                goto out;
3155        }
3156
3157        if (nodeid == slab_node) {
3158                /*
3159                 * Use the locally cached objects if possible.
3160                 * However ____cache_alloc does not allow fallback
3161                 * to other nodes. It may fail while we still have
3162                 * objects on other nodes available.
3163                 */
3164                ptr = ____cache_alloc(cachep, flags);
3165                if (ptr)
3166                        goto out;
3167        }
3168        /* ___cache_alloc_node can fall back to other nodes */
3169        ptr = ____cache_alloc_node(cachep, flags, nodeid);
3170  out:
3171        local_irq_restore(save_flags);
3172        ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3173        kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
3174                                 flags);
3175
3176        if (likely(ptr)) {
3177                kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
3178                if (unlikely(flags & __GFP_ZERO))
3179                        memset(ptr, 0, cachep->object_size);
3180        }
3181
3182        return ptr;
3183}
3184
3185static __always_inline void *
3186__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3187{
3188        void *objp;
3189
3190        if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3191                objp = alternate_node_alloc(cache, flags);
3192                if (objp)
3193                        goto out;
3194        }
3195        objp = ____cache_alloc(cache, flags);
3196
3197        /*
3198         * We may just have run out of memory on the local node.
3199         * ____cache_alloc_node() knows how to locate memory on other nodes
3200         */
3201        if (!objp)
3202                objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3203
3204  out:
3205        return objp;
3206}
3207#else
3208
3209static __always_inline void *
3210__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3211{
3212        return ____cache_alloc(cachep, flags);
3213}
3214
3215#endif /* CONFIG_NUMA */
3216
3217static __always_inline void *
3218slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3219{
3220        unsigned long save_flags;
3221        void *objp;
3222
3223        flags &= gfp_allowed_mask;
3224
3225        lockdep_trace_alloc(flags);
3226
3227        if (slab_should_failslab(cachep, flags))
3228                return NULL;
3229
3230        cachep = memcg_kmem_get_cache(cachep, flags);
3231
3232        cache_alloc_debugcheck_before(cachep, flags);
3233        local_irq_save(save_flags);
3234        objp = __do_cache_alloc(cachep, flags);
3235        local_irq_restore(save_flags);
3236        objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3237        kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
3238                                 flags);
3239        prefetchw(objp);
3240
3241        if (likely(objp)) {
3242                kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
3243                if (unlikely(flags & __GFP_ZERO))
3244                        memset(objp, 0, cachep->object_size);
3245        }
3246
3247        return objp;
3248}
3249
3250/*
3251 * Caller needs to acquire correct kmem_cache_node's list_lock
3252 * @list: List of detached free slabs should be freed by caller
3253 */
3254static void free_block(struct kmem_cache *cachep, void **objpp,
3255                        int nr_objects, int node, struct list_head *list)
3256{
3257        int i;
3258        struct kmem_cache_node *n = get_node(cachep, node);
3259
3260        for (i = 0; i < nr_objects; i++) {
3261                void *objp;
3262                struct page *page;
3263
3264                clear_obj_pfmemalloc(&objpp[i]);
3265                objp = objpp[i];
3266
3267                page = virt_to_head_page(objp);
3268                list_del(&page->lru);
3269                check_spinlock_acquired_node(cachep, node);
3270                slab_put_obj(cachep, page, objp, node);
3271                STATS_DEC_ACTIVE(cachep);
3272                n->free_objects++;
3273
3274                /* fixup slab chains */
3275                if (page->active == 0) {
3276                        if (n->free_objects > n->free_limit) {
3277                                n->free_objects -= cachep->num;
3278                                list_add_tail(&page->lru, list);
3279                        } else {
3280                                list_add(&page->lru, &n->slabs_free);
3281                        }
3282                } else {
3283                        /* Unconditionally move a slab to the end of the
3284                         * partial list on free - maximum time for the
3285                         * other objects to be freed, too.
3286                         */
3287                        list_add_tail(&page->lru, &n->slabs_partial);
3288                }
3289        }
3290}
3291
3292static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3293{
3294        int batchcount;
3295        struct kmem_cache_node *n;
3296        int node = numa_mem_id();
3297        LIST_HEAD(list);
3298
3299        batchcount = ac->batchcount;
3300#if DEBUG
3301        BUG_ON(!batchcount || batchcount > ac->avail);
3302#endif
3303        check_irq_off();
3304        n = get_node(cachep, node);
3305        spin_lock(&n->list_lock);
3306        if (n->shared) {
3307                struct array_cache *shared_array = n->shared;
3308                int max = shared_array->limit - shared_array->avail;
3309                if (max) {
3310                        if (batchcount > max)
3311                                batchcount = max;
3312                        memcpy(&(shared_array->entry[shared_array->avail]),
3313                               ac->entry, sizeof(void *) * batchcount);
3314                        shared_array->avail += batchcount;
3315                        goto free_done;
3316                }
3317        }
3318
3319        free_block(cachep, ac->entry, batchcount, node, &list);
3320free_done:
3321#if STATS
3322        {
3323                int i = 0;
3324                struct list_head *p;
3325
3326                p = n->slabs_free.next;
3327                while (p != &(n->slabs_free)) {
3328                        struct page *page;
3329
3330                        page = list_entry(p, struct page, lru);
3331                        BUG_ON(page->active);
3332
3333                        i++;
3334                        p = p->next;
3335                }
3336                STATS_SET_FREEABLE(cachep, i);
3337        }
3338#endif
3339        spin_unlock(&n->list_lock);
3340        slabs_destroy(cachep, &list);
3341        ac->avail -= batchcount;
3342        memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3343}
3344
3345/*
3346 * Release an obj back to its cache. If the obj has a constructed state, it must
3347 * be in this state _before_ it is released.  Called with disabled ints.
3348 */
3349static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3350                                unsigned long caller)
3351{
3352        struct array_cache *ac = cpu_cache_get(cachep);
3353
3354        check_irq_off();
3355        kmemleak_free_recursive(objp, cachep->flags);
3356        objp = cache_free_debugcheck(cachep, objp, caller);
3357
3358        kmemcheck_slab_free(cachep, objp, cachep->object_size);
3359
3360        /*
3361         * Skip calling cache_free_alien() when the platform is not numa.
3362         * This will avoid cache misses that happen while accessing slabp (which
3363         * is per page memory  reference) to get nodeid. Instead use a global
3364         * variable to skip the call, which is mostly likely to be present in
3365         * the cache.
3366         */
3367        if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3368                return;
3369
3370        if (ac->avail < ac->limit) {
3371                STATS_INC_FREEHIT(cachep);
3372        } else {
3373                STATS_INC_FREEMISS(cachep);
3374                cache_flusharray(cachep, ac);
3375        }
3376
3377        ac_put_obj(cachep, ac, objp);
3378}
3379
3380/**
3381 * kmem_cache_alloc - Allocate an object
3382 * @cachep: The cache to allocate from.
3383 * @flags: See kmalloc().
3384 *
3385 * Allocate an object from this cache.  The flags are only relevant
3386 * if the cache has no available objects.
3387 */
3388void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3389{
3390        void *ret = slab_alloc(cachep, flags, _RET_IP_);
3391
3392        trace_kmem_cache_alloc(_RET_IP_, ret,
3393                               cachep->object_size, cachep->size, flags);
3394
3395        return ret;
3396}
3397EXPORT_SYMBOL(kmem_cache_alloc);
3398
3399#ifdef CONFIG_TRACING
3400void *
3401kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3402{
3403        void *ret;
3404
3405        ret = slab_alloc(cachep, flags, _RET_IP_);
3406
3407        trace_kmalloc(_RET_IP_, ret,
3408                      size, cachep->size, flags);
3409        return ret;
3410}
3411EXPORT_SYMBOL(kmem_cache_alloc_trace);
3412#endif
3413
3414#ifdef CONFIG_NUMA
3415/**
3416 * kmem_cache_alloc_node - Allocate an object on the specified node
3417 * @cachep: The cache to allocate from.
3418 * @flags: See kmalloc().
3419 * @nodeid: node number of the target node.
3420 *
3421 * Identical to kmem_cache_alloc but it will allocate memory on the given
3422 * node, which can improve the performance for cpu bound structures.
3423 *
3424 * Fallback to other node is possible if __GFP_THISNODE is not set.
3425 */
3426void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3427{
3428        void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3429
3430        trace_kmem_cache_alloc_node(_RET_IP_, ret,
3431                                    cachep->object_size, cachep->size,
3432                                    flags, nodeid);
3433
3434        return ret;
3435}
3436EXPORT_SYMBOL(kmem_cache_alloc_node);
3437
3438#ifdef CONFIG_TRACING
3439void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3440                                  gfp_t flags,
3441                                  int nodeid,
3442                                  size_t size)
3443{
3444        void *ret;
3445
3446        ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3447
3448        trace_kmalloc_node(_RET_IP_, ret,
3449                           size, cachep->size,
3450                           flags, nodeid);
3451        return ret;
3452}
3453EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3454#endif
3455
3456static __always_inline void *
3457__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3458{
3459        struct kmem_cache *cachep;
3460
3461        cachep = kmalloc_slab(size, flags);
3462        if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3463                return cachep;
3464        return kmem_cache_alloc_node_trace(cachep, flags, node, size);
3465}
3466
3467void *__kmalloc_node(size_t size, gfp_t flags, int node)
3468{
3469        return __do_kmalloc_node(size, flags, node, _RET_IP_);
3470}
3471EXPORT_SYMBOL(__kmalloc_node);
3472
3473void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3474                int node, unsigned long caller)
3475{
3476        return __do_kmalloc_node(size, flags, node, caller);
3477}
3478EXPORT_SYMBOL(__kmalloc_node_track_caller);
3479#endif /* CONFIG_NUMA */
3480
3481/**
3482 * __do_kmalloc - allocate memory
3483 * @size: how many bytes of memory are required.
3484 * @flags: the type of memory to allocate (see kmalloc).
3485 * @caller: function caller for debug tracking of the caller
3486 */
3487static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3488                                          unsigned long caller)
3489{
3490        struct kmem_cache *cachep;
3491        void *ret;
3492
3493        cachep = kmalloc_slab(size, flags);
3494        if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3495                return cachep;
3496        ret = slab_alloc(cachep, flags, caller);
3497
3498        trace_kmalloc(caller, ret,
3499                      size, cachep->size, flags);
3500
3501        return ret;
3502}
3503
3504void *__kmalloc(size_t size, gfp_t flags)
3505{
3506        return __do_kmalloc(size, flags, _RET_IP_);
3507}
3508EXPORT_SYMBOL(__kmalloc);
3509
3510void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3511{
3512        return __do_kmalloc(size, flags, caller);
3513}
3514EXPORT_SYMBOL(__kmalloc_track_caller);
3515
3516/**
3517 * kmem_cache_free - Deallocate an object
3518 * @cachep: The cache the allocation was from.
3519 * @objp: The previously allocated object.
3520 *
3521 * Free an object which was previously allocated from this
3522 * cache.
3523 */
3524void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3525{
3526        unsigned long flags;
3527        cachep = cache_from_obj(cachep, objp);
3528        if (!cachep)
3529                return;
3530
3531        local_irq_save(flags);
3532        debug_check_no_locks_freed(objp, cachep->object_size);
3533        if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3534                debug_check_no_obj_freed(objp, cachep->object_size);
3535        __cache_free(cachep, objp, _RET_IP_);
3536        local_irq_restore(flags);
3537
3538        trace_kmem_cache_free(_RET_IP_, objp);
3539}
3540EXPORT_SYMBOL(kmem_cache_free);
3541
3542/**
3543 * kfree - free previously allocated memory
3544 * @objp: pointer returned by kmalloc.
3545 *
3546 * If @objp is NULL, no operation is performed.
3547 *
3548 * Don't free memory not originally allocated by kmalloc()
3549 * or you will run into trouble.
3550 */
3551void kfree(const void *objp)
3552{
3553        struct kmem_cache *c;
3554        unsigned long flags;
3555
3556        trace_kfree(_RET_IP_, objp);
3557
3558        if (unlikely(ZERO_OR_NULL_PTR(objp)))
3559                return;
3560        local_irq_save(flags);
3561        kfree_debugcheck(objp);
3562        c = virt_to_cache(objp);
3563        debug_check_no_locks_freed(objp, c->object_size);
3564
3565        debug_check_no_obj_freed(objp, c->object_size);
3566        __cache_free(c, (void *)objp, _RET_IP_);
3567        local_irq_restore(flags);
3568}
3569EXPORT_SYMBOL(kfree);
3570
3571/*
3572 * This initializes kmem_cache_node or resizes various caches for all nodes.
3573 */
3574static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
3575{
3576        int node;
3577        struct kmem_cache_node *n;
3578        struct array_cache *new_shared;
3579        struct alien_cache **new_alien = NULL;
3580
3581        for_each_online_node(node) {
3582
3583                if (use_alien_caches) {
3584                        new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3585                        if (!new_alien)
3586                                goto fail;
3587                }
3588
3589                new_shared = NULL;
3590                if (cachep->shared) {
3591                        new_shared = alloc_arraycache(node,
3592                                cachep->shared*cachep->batchcount,
3593                                        0xbaadf00d, gfp);
3594                        if (!new_shared) {
3595                                free_alien_cache(new_alien);
3596                                goto fail;
3597                        }
3598                }
3599
3600                n = get_node(cachep, node);
3601                if (n) {
3602                        struct array_cache *shared = n->shared;
3603                        LIST_HEAD(list);
3604
3605                        spin_lock_irq(&n->list_lock);
3606
3607                        if (shared)
3608                                free_block(cachep, shared->entry,
3609                                                shared->avail, node, &list);
3610
3611                        n->shared = new_shared;
3612                        if (!n->alien) {
3613                                n->alien = new_alien;
3614                                new_alien = NULL;
3615                        }
3616                        n->free_limit = (1 + nr_cpus_node(node)) *
3617                                        cachep->batchcount + cachep->num;
3618                        spin_unlock_irq(&n->list_lock);
3619                        slabs_destroy(cachep, &list);
3620                        kfree(shared);
3621                        free_alien_cache(new_alien);
3622                        continue;
3623                }
3624                n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
3625                if (!n) {
3626                        free_alien_cache(new_alien);
3627                        kfree(new_shared);
3628                        goto fail;
3629                }
3630
3631                kmem_cache_node_init(n);
3632                n->next_reap = jiffies + REAPTIMEOUT_NODE +
3633                                ((unsigned long)cachep) % REAPTIMEOUT_NODE;
3634                n->shared = new_shared;
3635                n->alien = new_alien;
3636                n->free_limit = (1 + nr_cpus_node(node)) *
3637                                        cachep->batchcount + cachep->num;
3638                cachep->node[node] = n;
3639        }
3640        return 0;
3641
3642fail:
3643        if (!cachep->list.next) {
3644                /* Cache is not active yet. Roll back what we did */
3645                node--;
3646                while (node >= 0) {
3647                        n = get_node(cachep, node);
3648                        if (n) {
3649                                kfree(n->shared);
3650                                free_alien_cache(n->alien);
3651                                kfree(n);
3652                                cachep->node[node] = NULL;
3653                        }
3654                        node--;
3655                }
3656        }
3657        return -ENOMEM;
3658}
3659
3660/* Always called with the slab_mutex held */
3661static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3662                                int batchcount, int shared, gfp_t gfp)
3663{
3664        struct array_cache __percpu *cpu_cache, *prev;
3665        int cpu;
3666
3667        cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3668        if (!cpu_cache)
3669                return -ENOMEM;
3670
3671        prev = cachep->cpu_cache;
3672        cachep->cpu_cache = cpu_cache;
3673        kick_all_cpus_sync();
3674
3675        check_irq_on();
3676        cachep->batchcount = batchcount;
3677        cachep->limit = limit;
3678        cachep->shared = shared;
3679
3680        if (!prev)
3681                goto alloc_node;
3682
3683        for_each_online_cpu(cpu) {
3684                LIST_HEAD(list);
3685                int node;
3686                struct kmem_cache_node *n;
3687                struct array_cache *ac = per_cpu_ptr(prev, cpu);
3688
3689                node = cpu_to_mem(cpu);
3690                n = get_node(cachep, node);
3691                spin_lock_irq(&n->list_lock);
3692                free_block(cachep, ac->entry, ac->avail, node, &list);
3693                spin_unlock_irq(&n->list_lock);
3694                slabs_destroy(cachep, &list);
3695        }
3696        free_percpu(prev);
3697
3698alloc_node:
3699        return alloc_kmem_cache_node(cachep, gfp);
3700}
3701
3702static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3703                                int batchcount, int shared, gfp_t gfp)
3704{
3705        int ret;
3706        struct kmem_cache *c = NULL;
3707        int i = 0;
3708
3709        ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3710
3711        if (slab_state < FULL)
3712                return ret;
3713
3714        if ((ret < 0) || !is_root_cache(cachep))
3715                return ret;
3716
3717        VM_BUG_ON(!mutex_is_locked(&slab_mutex));
3718        for_each_memcg_cache_index(i) {
3719                c = cache_from_memcg_idx(cachep, i);
3720                if (c)
3721                        /* return value determined by the parent cache only */
3722                        __do_tune_cpucache(c, limit, batchcount, shared, gfp);
3723        }
3724
3725        return ret;
3726}
3727
3728/* Called with slab_mutex held always */
3729static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3730{
3731        int err;
3732        int limit = 0;
3733        int shared = 0;
3734        int batchcount = 0;
3735
3736        if (!is_root_cache(cachep)) {
3737                struct kmem_cache *root = memcg_root_cache(cachep);
3738                limit = root->limit;
3739                shared = root->shared;
3740                batchcount = root->batchcount;
3741        }
3742
3743        if (limit && shared && batchcount)
3744                goto skip_setup;
3745        /*
3746         * The head array serves three purposes:
3747         * - create a LIFO ordering, i.e. return objects that are cache-warm
3748         * - reduce the number of spinlock operations.
3749         * - reduce the number of linked list operations on the slab and
3750         *   bufctl chains: array operations are cheaper.
3751         * The numbers are guessed, we should auto-tune as described by
3752         * Bonwick.
3753         */
3754        if (cachep->size > 131072)
3755                limit = 1;
3756        else if (cachep->size > PAGE_SIZE)
3757                limit = 8;
3758        else if (cachep->size > 1024)
3759                limit = 24;
3760        else if (cachep->size > 256)
3761                limit = 54;
3762        else
3763                limit = 120;
3764
3765        /*
3766         * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3767         * allocation behaviour: Most allocs on one cpu, most free operations
3768         * on another cpu. For these cases, an efficient object passing between
3769         * cpus is necessary. This is provided by a shared array. The array
3770         * replaces Bonwick's magazine layer.
3771         * On uniprocessor, it's functionally equivalent (but less efficient)
3772         * to a larger limit. Thus disabled by default.
3773         */
3774        shared = 0;
3775        if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3776                shared = 8;
3777
3778#if DEBUG
3779        /*
3780         * With debugging enabled, large batchcount lead to excessively long
3781         * periods with disabled local interrupts. Limit the batchcount
3782         */
3783        if (limit > 32)
3784                limit = 32;
3785#endif
3786        batchcount = (limit + 1) / 2;
3787skip_setup:
3788        err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3789        if (err)
3790                printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
3791                       cachep->name, -err);
3792        return err;
3793}
3794
3795/*
3796 * Drain an array if it contains any elements taking the node lock only if
3797 * necessary. Note that the node listlock also protects the array_cache
3798 * if drain_array() is used on the shared array.
3799 */
3800static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3801                         struct array_cache *ac, int force, int node)
3802{
3803        LIST_HEAD(list);
3804        int tofree;
3805
3806        if (!ac || !ac->avail)
3807                return;
3808        if (ac->touched && !force) {
3809                ac->touched = 0;
3810        } else {
3811                spin_lock_irq(&n->list_lock);
3812                if (ac->avail) {
3813                        tofree = force ? ac->avail : (ac->limit + 4) / 5;
3814                        if (tofree > ac->avail)
3815                                tofree = (ac->avail + 1) / 2;
3816                        free_block(cachep, ac->entry, tofree, node, &list);
3817                        ac->avail -= tofree;
3818                        memmove(ac->entry, &(ac->entry[tofree]),
3819                                sizeof(void *) * ac->avail);
3820                }
3821                spin_unlock_irq(&n->list_lock);
3822                slabs_destroy(cachep, &list);
3823        }
3824}
3825
3826/**
3827 * cache_reap - Reclaim memory from caches.
3828 * @w: work descriptor
3829 *
3830 * Called from workqueue/eventd every few seconds.
3831 * Purpose:
3832 * - clear the per-cpu caches for this CPU.
3833 * - return freeable pages to the main free memory pool.
3834 *
3835 * If we cannot acquire the cache chain mutex then just give up - we'll try
3836 * again on the next iteration.
3837 */
3838static void cache_reap(struct work_struct *w)
3839{
3840        struct kmem_cache *searchp;
3841        struct kmem_cache_node *n;
3842        int node = numa_mem_id();
3843        struct delayed_work *work = to_delayed_work(w);
3844
3845        if (!mutex_trylock(&slab_mutex))
3846                /* Give up. Setup the next iteration. */
3847                goto out;
3848
3849        list_for_each_entry(searchp, &slab_caches, list) {
3850                check_irq_on();
3851
3852                /*
3853                 * We only take the node lock if absolutely necessary and we
3854                 * have established with reasonable certainty that
3855                 * we can do some work if the lock was obtained.
3856                 */
3857                n = get_node(searchp, node);
3858
3859                reap_alien(searchp, n);
3860
3861                drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
3862
3863                /*
3864                 * These are racy checks but it does not matter
3865                 * if we skip one check or scan twice.
3866                 */
3867                if (time_after(n->next_reap, jiffies))
3868                        goto next;
3869
3870                n->next_reap = jiffies + REAPTIMEOUT_NODE;
3871
3872                drain_array(searchp, n, n->shared, 0, node);
3873
3874                if (n->free_touched)
3875                        n->free_touched = 0;
3876                else {
3877                        int freed;
3878
3879                        freed = drain_freelist(searchp, n, (n->free_limit +
3880                                5 * searchp->num - 1) / (5 * searchp->num));
3881                        STATS_ADD_REAPED(searchp, freed);
3882                }
3883next:
3884                cond_resched();
3885        }
3886        check_irq_on();
3887        mutex_unlock(&slab_mutex);
3888        next_reap_node();
3889out:
3890        /* Set up the next iteration */
3891        schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
3892}
3893
3894#ifdef CONFIG_SLABINFO
3895void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
3896{
3897        struct page *page;
3898        unsigned long active_objs;
3899        unsigned long num_objs;
3900        unsigned long active_slabs = 0;
3901        unsigned long num_slabs, free_objects = 0, shared_avail = 0;
3902        const char *name;
3903        char *error = NULL;
3904        int node;
3905        struct kmem_cache_node *n;
3906
3907        active_objs = 0;
3908        num_slabs = 0;
3909        for_each_kmem_cache_node(cachep, node, n) {
3910
3911                check_irq_on();
3912                spin_lock_irq(&n->list_lock);
3913
3914                list_for_each_entry(page, &n->slabs_full, lru) {
3915                        if (page->active != cachep->num && !error)
3916                                error = "slabs_full accounting error";
3917                        active_objs += cachep->num;
3918                        active_slabs++;
3919                }
3920                list_for_each_entry(page, &n->slabs_partial, lru) {
3921                        if (page->active == cachep->num && !error)
3922                                error = "slabs_partial accounting error";
3923                        if (!page->active && !error)
3924                                error = "slabs_partial accounting error";
3925                        active_objs += page->active;
3926                        active_slabs++;
3927                }
3928                list_for_each_entry(page, &n->slabs_free, lru) {
3929                        if (page->active && !error)
3930                                error = "slabs_free accounting error";
3931                        num_slabs++;
3932                }
3933                free_objects += n->free_objects;
3934                if (n->shared)
3935                        shared_avail += n->shared->avail;
3936
3937                spin_unlock_irq(&n->list_lock);
3938        }
3939        num_slabs += active_slabs;
3940        num_objs = num_slabs * cachep->num;
3941        if (num_objs - active_objs != free_objects && !error)
3942                error = "free_objects accounting error";
3943
3944        name = cachep->name;
3945        if (error)
3946                printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
3947
3948        sinfo->active_objs = active_objs;
3949        sinfo->num_objs = num_objs;
3950        sinfo->active_slabs = active_slabs;
3951        sinfo->num_slabs = num_slabs;
3952        sinfo->shared_avail = shared_avail;
3953        sinfo->limit = cachep->limit;
3954        sinfo->batchcount = cachep->batchcount;
3955        sinfo->shared = cachep->shared;
3956        sinfo->objects_per_slab = cachep->num;
3957        sinfo->cache_order = cachep->gfporder;
3958}
3959
3960void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
3961{
3962#if STATS
3963        {                       /* node stats */
3964                unsigned long high = cachep->high_mark;
3965                unsigned long allocs = cachep->num_allocations;
3966                unsigned long grown = cachep->grown;
3967                unsigned long reaped = cachep->reaped;
3968                unsigned long errors = cachep->errors;
3969                unsigned long max_freeable = cachep->max_freeable;
3970                unsigned long node_allocs = cachep->node_allocs;
3971                unsigned long node_frees = cachep->node_frees;
3972                unsigned long overflows = cachep->node_overflow;
3973
3974                seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
3975                           "%4lu %4lu %4lu %4lu %4lu",
3976                           allocs, high, grown,
3977                           reaped, errors, max_freeable, node_allocs,
3978                           node_frees, overflows);
3979        }
3980        /* cpu stats */
3981        {
3982                unsigned long allochit = atomic_read(&cachep->allochit);
3983                unsigned long allocmiss = atomic_read(&cachep->allocmiss);
3984                unsigned long freehit = atomic_read(&cachep->freehit);
3985                unsigned long freemiss = atomic_read(&cachep->freemiss);
3986
3987                seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
3988                           allochit, allocmiss, freehit, freemiss);
3989        }
3990#endif
3991}
3992
3993#define MAX_SLABINFO_WRITE 128
3994/**
3995 * slabinfo_write - Tuning for the slab allocator
3996 * @file: unused
3997 * @buffer: user buffer
3998 * @count: data length
3999 * @ppos: unused
4000 */
4001ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4002                       size_t count, loff_t *ppos)
4003{
4004        char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4005        int limit, batchcount, shared, res;
4006        struct kmem_cache *cachep;
4007
4008        if (count > MAX_SLABINFO_WRITE)
4009                return -EINVAL;
4010        if (copy_from_user(&kbuf, buffer, count))
4011                return -EFAULT;
4012        kbuf[MAX_SLABINFO_WRITE] = '\0';
4013
4014        tmp = strchr(kbuf, ' ');
4015        if (!tmp)
4016                return -EINVAL;
4017        *tmp = '\0';
4018        tmp++;
4019        if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4020                return -EINVAL;
4021
4022        /* Find the cache in the chain of caches. */
4023        mutex_lock(&slab_mutex);
4024        res = -EINVAL;
4025        list_for_each_entry(cachep, &slab_caches, list) {
4026                if (!strcmp(cachep->name, kbuf)) {
4027                        if (limit < 1 || batchcount < 1 ||
4028                                        batchcount > limit || shared < 0) {
4029                                res = 0;
4030                        } else {
4031                                res = do_tune_cpucache(cachep, limit,
4032                                                       batchcount, shared,
4033                                                       GFP_KERNEL);
4034                        }
4035                        break;
4036                }
4037        }
4038        mutex_unlock(&slab_mutex);
4039        if (res >= 0)
4040                res = count;
4041        return res;
4042}
4043
4044#ifdef CONFIG_DEBUG_SLAB_LEAK
4045
4046static void *leaks_start(struct seq_file *m, loff_t *pos)
4047{
4048        mutex_lock(&slab_mutex);
4049        return seq_list_start(&slab_caches, *pos);
4050}
4051
4052static inline int add_caller(unsigned long *n, unsigned long v)
4053{
4054        unsigned long *p;
4055        int l;
4056        if (!v)
4057                return 1;
4058        l = n[1];
4059        p = n + 2;
4060        while (l) {
4061                int i = l/2;
4062                unsigned long *q = p + 2 * i;
4063                if (*q == v) {
4064                        q[1]++;
4065                        return 1;
4066                }
4067                if (*q > v) {
4068                        l = i;
4069                } else {
4070                        p = q + 2;
4071                        l -= i + 1;
4072                }
4073        }
4074        if (++n[1] == n[0])
4075                return 0;
4076        memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4077        p[0] = v;
4078        p[1] = 1;
4079        return 1;
4080}
4081
4082static void handle_slab(unsigned long *n, struct kmem_cache *c,
4083                                                struct page *page)
4084{
4085        void *p;
4086        int i;
4087
4088        if (n[0] == n[1])
4089                return;
4090        for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4091                if (get_obj_status(page, i) != OBJECT_ACTIVE)
4092                        continue;
4093
4094                if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4095                        return;
4096        }
4097}
4098
4099static void show_symbol(struct seq_file *m, unsigned long address)
4100{
4101#ifdef CONFIG_KALLSYMS
4102        unsigned long offset, size;
4103        char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4104
4105        if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4106                seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4107                if (modname[0])
4108                        seq_printf(m, " [%s]", modname);
4109                return;
4110        }
4111#endif
4112        seq_printf(m, "%p", (void *)address);
4113}
4114
4115static int leaks_show(struct seq_file *m, void *p)
4116{
4117        struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4118        struct page *page;
4119        struct kmem_cache_node *n;
4120        const char *name;
4121        unsigned long *x = m->private;
4122        int node;
4123        int i;
4124
4125        if (!(cachep->flags & SLAB_STORE_USER))
4126                return 0;
4127        if (!(cachep->flags & SLAB_RED_ZONE))
4128                return 0;
4129
4130        /* OK, we can do it */
4131
4132        x[1] = 0;
4133
4134        for_each_kmem_cache_node(cachep, node, n) {
4135
4136                check_irq_on();
4137                spin_lock_irq(&n->list_lock);
4138
4139                list_for_each_entry(page, &n->slabs_full, lru)
4140                        handle_slab(x, cachep, page);
4141                list_for_each_entry(page, &n->slabs_partial, lru)
4142                        handle_slab(x, cachep, page);
4143                spin_unlock_irq(&n->list_lock);
4144        }
4145        name = cachep->name;
4146        if (x[0] == x[1]) {
4147                /* Increase the buffer size */
4148                mutex_unlock(&slab_mutex);
4149                m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4150                if (!m->private) {
4151                        /* Too bad, we are really out */
4152                        m->private = x;
4153                        mutex_lock(&slab_mutex);
4154                        return -ENOMEM;
4155                }
4156                *(unsigned long *)m->private = x[0] * 2;
4157                kfree(x);
4158                mutex_lock(&slab_mutex);
4159                /* Now make sure this entry will be retried */
4160                m->count = m->size;
4161                return 0;
4162        }
4163        for (i = 0; i < x[1]; i++) {
4164                seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4165                show_symbol(m, x[2*i+2]);
4166                seq_putc(m, '\n');
4167        }
4168
4169        return 0;
4170}
4171
4172static const struct seq_operations slabstats_op = {
4173        .start = leaks_start,
4174        .next = slab_next,
4175        .stop = slab_stop,
4176        .show = leaks_show,
4177};
4178
4179static int slabstats_open(struct inode *inode, struct file *file)
4180{
4181        unsigned long *n;
4182
4183        n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
4184        if (!n)
4185                return -ENOMEM;
4186
4187        *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4188
4189        return 0;
4190}
4191
4192static const struct file_operations proc_slabstats_operations = {
4193        .open           = slabstats_open,
4194        .read           = seq_read,
4195        .llseek         = seq_lseek,
4196        .release        = seq_release_private,
4197};
4198#endif
4199
4200static int __init slab_proc_init(void)
4201{
4202#ifdef CONFIG_DEBUG_SLAB_LEAK
4203        proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4204#endif
4205        return 0;
4206}
4207module_init(slab_proc_init);
4208#endif
4209
4210/**
4211 * ksize - get the actual amount of memory allocated for a given object
4212 * @objp: Pointer to the object
4213 *
4214 * kmalloc may internally round up allocations and return more memory
4215 * than requested. ksize() can be used to determine the actual amount of
4216 * memory allocated. The caller may use this additional memory, even though
4217 * a smaller amount of memory was initially specified with the kmalloc call.
4218 * The caller must guarantee that objp points to a valid object previously
4219 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4220 * must not be freed during the duration of the call.
4221 */
4222size_t ksize(const void *objp)
4223{
4224        BUG_ON(!objp);
4225        if (unlikely(objp == ZERO_SIZE_PTR))
4226                return 0;
4227
4228        return virt_to_cache(objp)->object_size;
4229}
4230EXPORT_SYMBOL(ksize);
4231