linux/mm/slab.c
<<
>>
Prefs
   1/*
   2 * linux/mm/slab.c
   3 * Written by Mark Hemment, 1996/97.
   4 * (markhe@nextd.demon.co.uk)
   5 *
   6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
   7 *
   8 * Major cleanup, different bufctl logic, per-cpu arrays
   9 *      (c) 2000 Manfred Spraul
  10 *
  11 * Cleanup, make the head arrays unconditional, preparation for NUMA
  12 *      (c) 2002 Manfred Spraul
  13 *
  14 * An implementation of the Slab Allocator as described in outline in;
  15 *      UNIX Internals: The New Frontiers by Uresh Vahalia
  16 *      Pub: Prentice Hall      ISBN 0-13-101908-2
  17 * or with a little more detail in;
  18 *      The Slab Allocator: An Object-Caching Kernel Memory Allocator
  19 *      Jeff Bonwick (Sun Microsystems).
  20 *      Presented at: USENIX Summer 1994 Technical Conference
  21 *
  22 * The memory is organized in caches, one cache for each object type.
  23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
  24 * Each cache consists out of many slabs (they are small (usually one
  25 * page long) and always contiguous), and each slab contains multiple
  26 * initialized objects.
  27 *
  28 * This means, that your constructor is used only for newly allocated
  29 * slabs and you must pass objects with the same initializations to
  30 * kmem_cache_free.
  31 *
  32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
  33 * normal). If you need a special memory type, then must create a new
  34 * cache for that memory type.
  35 *
  36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
  37 *   full slabs with 0 free objects
  38 *   partial slabs
  39 *   empty slabs with no allocated objects
  40 *
  41 * If partial slabs exist, then new allocations come from these slabs,
  42 * otherwise from empty slabs or new slabs are allocated.
  43 *
  44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
  45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
  46 *
  47 * Each cache has a short per-cpu head array, most allocs
  48 * and frees go into that array, and if that array overflows, then 1/2
  49 * of the entries in the array are given back into the global cache.
  50 * The head array is strictly LIFO and should improve the cache hit rates.
  51 * On SMP, it additionally reduces the spinlock operations.
  52 *
  53 * The c_cpuarray may not be read with enabled local interrupts -
  54 * it's changed with a smp_call_function().
  55 *
  56 * SMP synchronization:
  57 *  constructors and destructors are called without any locking.
  58 *  Several members in struct kmem_cache and struct slab never change, they
  59 *      are accessed without any locking.
  60 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
  61 *      and local interrupts are disabled so slab code is preempt-safe.
  62 *  The non-constant members are protected with a per-cache irq spinlock.
  63 *
  64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
  65 * in 2000 - many ideas in the current implementation are derived from
  66 * his patch.
  67 *
  68 * Further notes from the original documentation:
  69 *
  70 * 11 April '97.  Started multi-threading - markhe
  71 *      The global cache-chain is protected by the mutex 'slab_mutex'.
  72 *      The sem is only needed when accessing/extending the cache-chain, which
  73 *      can never happen inside an interrupt (kmem_cache_create(),
  74 *      kmem_cache_shrink() and kmem_cache_reap()).
  75 *
  76 *      At present, each engine can be growing a cache.  This should be blocked.
  77 *
  78 * 15 March 2005. NUMA slab allocator.
  79 *      Shai Fultheim <shai@scalex86.org>.
  80 *      Shobhit Dayal <shobhit@calsoftinc.com>
  81 *      Alok N Kataria <alokk@calsoftinc.com>
  82 *      Christoph Lameter <christoph@lameter.com>
  83 *
  84 *      Modified the slab allocator to be node aware on NUMA systems.
  85 *      Each node has its own list of partial, free and full slabs.
  86 *      All object allocations for a node occur from node specific slab lists.
  87 */
  88
  89#include        <linux/slab.h>
  90#include        <linux/mm.h>
  91#include        <linux/poison.h>
  92#include        <linux/swap.h>
  93#include        <linux/cache.h>
  94#include        <linux/interrupt.h>
  95#include        <linux/init.h>
  96#include        <linux/compiler.h>
  97#include        <linux/cpuset.h>
  98#include        <linux/proc_fs.h>
  99#include        <linux/seq_file.h>
 100#include        <linux/notifier.h>
 101#include        <linux/kallsyms.h>
 102#include        <linux/cpu.h>
 103#include        <linux/sysctl.h>
 104#include        <linux/module.h>
 105#include        <linux/rcupdate.h>
 106#include        <linux/string.h>
 107#include        <linux/uaccess.h>
 108#include        <linux/nodemask.h>
 109#include        <linux/kmemleak.h>
 110#include        <linux/mempolicy.h>
 111#include        <linux/mutex.h>
 112#include        <linux/fault-inject.h>
 113#include        <linux/rtmutex.h>
 114#include        <linux/reciprocal_div.h>
 115#include        <linux/debugobjects.h>
 116#include        <linux/kmemcheck.h>
 117#include        <linux/memory.h>
 118#include        <linux/prefetch.h>
 119
 120#include        <net/sock.h>
 121
 122#include        <asm/cacheflush.h>
 123#include        <asm/tlbflush.h>
 124#include        <asm/page.h>
 125
 126#include <trace/events/kmem.h>
 127
 128#include        "internal.h"
 129
 130#include        "slab.h"
 131
 132/*
 133 * DEBUG        - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
 134 *                0 for faster, smaller code (especially in the critical paths).
 135 *
 136 * STATS        - 1 to collect stats for /proc/slabinfo.
 137 *                0 for faster, smaller code (especially in the critical paths).
 138 *
 139 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 140 */
 141
 142#ifdef CONFIG_DEBUG_SLAB
 143#define DEBUG           1
 144#define STATS           1
 145#define FORCED_DEBUG    1
 146#else
 147#define DEBUG           0
 148#define STATS           0
 149#define FORCED_DEBUG    0
 150#endif
 151
 152/* Shouldn't this be in a header file somewhere? */
 153#define BYTES_PER_WORD          sizeof(void *)
 154#define REDZONE_ALIGN           max(BYTES_PER_WORD, __alignof__(unsigned long long))
 155
 156#ifndef ARCH_KMALLOC_FLAGS
 157#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
 158#endif
 159
 160/*
 161 * true if a page was allocated from pfmemalloc reserves for network-based
 162 * swap
 163 */
 164static bool pfmemalloc_active __read_mostly;
 165
 166/*
 167 * kmem_bufctl_t:
 168 *
 169 * Bufctl's are used for linking objs within a slab
 170 * linked offsets.
 171 *
 172 * This implementation relies on "struct page" for locating the cache &
 173 * slab an object belongs to.
 174 * This allows the bufctl structure to be small (one int), but limits
 175 * the number of objects a slab (not a cache) can contain when off-slab
 176 * bufctls are used. The limit is the size of the largest general cache
 177 * that does not use off-slab slabs.
 178 * For 32bit archs with 4 kB pages, is this 56.
 179 * This is not serious, as it is only for large objects, when it is unwise
 180 * to have too many per slab.
 181 * Note: This limit can be raised by introducing a general cache whose size
 182 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
 183 */
 184
 185typedef unsigned int kmem_bufctl_t;
 186#define BUFCTL_END      (((kmem_bufctl_t)(~0U))-0)
 187#define BUFCTL_FREE     (((kmem_bufctl_t)(~0U))-1)
 188#define BUFCTL_ACTIVE   (((kmem_bufctl_t)(~0U))-2)
 189#define SLAB_LIMIT      (((kmem_bufctl_t)(~0U))-3)
 190
 191/*
 192 * struct slab_rcu
 193 *
 194 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
 195 * arrange for kmem_freepages to be called via RCU.  This is useful if
 196 * we need to approach a kernel structure obliquely, from its address
 197 * obtained without the usual locking.  We can lock the structure to
 198 * stabilize it and check it's still at the given address, only if we
 199 * can be sure that the memory has not been meanwhile reused for some
 200 * other kind of object (which our subsystem's lock might corrupt).
 201 *
 202 * rcu_read_lock before reading the address, then rcu_read_unlock after
 203 * taking the spinlock within the structure expected at that address.
 204 */
 205struct slab_rcu {
 206        struct rcu_head head;
 207        struct kmem_cache *cachep;
 208        void *addr;
 209};
 210
 211/*
 212 * struct slab
 213 *
 214 * Manages the objs in a slab. Placed either at the beginning of mem allocated
 215 * for a slab, or allocated from an general cache.
 216 * Slabs are chained into three list: fully used, partial, fully free slabs.
 217 */
 218struct slab {
 219        union {
 220                struct {
 221                        struct list_head list;
 222                        unsigned long colouroff;
 223                        void *s_mem;            /* including colour offset */
 224                        unsigned int inuse;     /* num of objs active in slab */
 225                        kmem_bufctl_t free;
 226                        unsigned short nodeid;
 227                };
 228                struct slab_rcu __slab_cover_slab_rcu;
 229        };
 230};
 231
 232/*
 233 * struct array_cache
 234 *
 235 * Purpose:
 236 * - LIFO ordering, to hand out cache-warm objects from _alloc
 237 * - reduce the number of linked list operations
 238 * - reduce spinlock operations
 239 *
 240 * The limit is stored in the per-cpu structure to reduce the data cache
 241 * footprint.
 242 *
 243 */
 244struct array_cache {
 245        unsigned int avail;
 246        unsigned int limit;
 247        unsigned int batchcount;
 248        unsigned int touched;
 249        spinlock_t lock;
 250        void *entry[];  /*
 251                         * Must have this definition in here for the proper
 252                         * alignment of array_cache. Also simplifies accessing
 253                         * the entries.
 254                         *
 255                         * Entries should not be directly dereferenced as
 256                         * entries belonging to slabs marked pfmemalloc will
 257                         * have the lower bits set SLAB_OBJ_PFMEMALLOC
 258                         */
 259};
 260
 261#define SLAB_OBJ_PFMEMALLOC     1
 262static inline bool is_obj_pfmemalloc(void *objp)
 263{
 264        return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
 265}
 266
 267static inline void set_obj_pfmemalloc(void **objp)
 268{
 269        *objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
 270        return;
 271}
 272
 273static inline void clear_obj_pfmemalloc(void **objp)
 274{
 275        *objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
 276}
 277
 278/*
 279 * bootstrap: The caches do not work without cpuarrays anymore, but the
 280 * cpuarrays are allocated from the generic caches...
 281 */
 282#define BOOT_CPUCACHE_ENTRIES   1
 283struct arraycache_init {
 284        struct array_cache cache;
 285        void *entries[BOOT_CPUCACHE_ENTRIES];
 286};
 287
 288/*
 289 * The slab lists for all objects.
 290 */
 291struct kmem_list3 {
 292        struct list_head slabs_partial; /* partial list first, better asm code */
 293        struct list_head slabs_full;
 294        struct list_head slabs_free;
 295        unsigned long free_objects;
 296        unsigned int free_limit;
 297        unsigned int colour_next;       /* Per-node cache coloring */
 298        spinlock_t list_lock;
 299        struct array_cache *shared;     /* shared per node */
 300        struct array_cache **alien;     /* on other nodes */
 301        unsigned long next_reap;        /* updated without locking */
 302        int free_touched;               /* updated without locking */
 303};
 304
 305/*
 306 * Need this for bootstrapping a per node allocator.
 307 */
 308#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
 309static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
 310#define CACHE_CACHE 0
 311#define SIZE_AC MAX_NUMNODES
 312#define SIZE_L3 (2 * MAX_NUMNODES)
 313
 314static int drain_freelist(struct kmem_cache *cache,
 315                        struct kmem_list3 *l3, int tofree);
 316static void free_block(struct kmem_cache *cachep, void **objpp, int len,
 317                        int node);
 318static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
 319static void cache_reap(struct work_struct *unused);
 320
 321/*
 322 * This function must be completely optimized away if a constant is passed to
 323 * it.  Mostly the same as what is in linux/slab.h except it returns an index.
 324 */
 325static __always_inline int index_of(const size_t size)
 326{
 327        extern void __bad_size(void);
 328
 329        if (__builtin_constant_p(size)) {
 330                int i = 0;
 331
 332#define CACHE(x) \
 333        if (size <=x) \
 334                return i; \
 335        else \
 336                i++;
 337#include <linux/kmalloc_sizes.h>
 338#undef CACHE
 339                __bad_size();
 340        } else
 341                __bad_size();
 342        return 0;
 343}
 344
 345static int slab_early_init = 1;
 346
 347#define INDEX_AC index_of(sizeof(struct arraycache_init))
 348#define INDEX_L3 index_of(sizeof(struct kmem_list3))
 349
 350static void kmem_list3_init(struct kmem_list3 *parent)
 351{
 352        INIT_LIST_HEAD(&parent->slabs_full);
 353        INIT_LIST_HEAD(&parent->slabs_partial);
 354        INIT_LIST_HEAD(&parent->slabs_free);
 355        parent->shared = NULL;
 356        parent->alien = NULL;
 357        parent->colour_next = 0;
 358        spin_lock_init(&parent->list_lock);
 359        parent->free_objects = 0;
 360        parent->free_touched = 0;
 361}
 362
 363#define MAKE_LIST(cachep, listp, slab, nodeid)                          \
 364        do {                                                            \
 365                INIT_LIST_HEAD(listp);                                  \
 366                list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
 367        } while (0)
 368
 369#define MAKE_ALL_LISTS(cachep, ptr, nodeid)                             \
 370        do {                                                            \
 371        MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);  \
 372        MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
 373        MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);  \
 374        } while (0)
 375
 376#define CFLGS_OFF_SLAB          (0x80000000UL)
 377#define OFF_SLAB(x)     ((x)->flags & CFLGS_OFF_SLAB)
 378
 379#define BATCHREFILL_LIMIT       16
 380/*
 381 * Optimization question: fewer reaps means less probability for unnessary
 382 * cpucache drain/refill cycles.
 383 *
 384 * OTOH the cpuarrays can contain lots of objects,
 385 * which could lock up otherwise freeable slabs.
 386 */
 387#define REAPTIMEOUT_CPUC        (2*HZ)
 388#define REAPTIMEOUT_LIST3       (4*HZ)
 389
 390#if STATS
 391#define STATS_INC_ACTIVE(x)     ((x)->num_active++)
 392#define STATS_DEC_ACTIVE(x)     ((x)->num_active--)
 393#define STATS_INC_ALLOCED(x)    ((x)->num_allocations++)
 394#define STATS_INC_GROWN(x)      ((x)->grown++)
 395#define STATS_ADD_REAPED(x,y)   ((x)->reaped += (y))
 396#define STATS_SET_HIGH(x)                                               \
 397        do {                                                            \
 398                if ((x)->num_active > (x)->high_mark)                   \
 399                        (x)->high_mark = (x)->num_active;               \
 400        } while (0)
 401#define STATS_INC_ERR(x)        ((x)->errors++)
 402#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
 403#define STATS_INC_NODEFREES(x)  ((x)->node_frees++)
 404#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
 405#define STATS_SET_FREEABLE(x, i)                                        \
 406        do {                                                            \
 407                if ((x)->max_freeable < i)                              \
 408                        (x)->max_freeable = i;                          \
 409        } while (0)
 410#define STATS_INC_ALLOCHIT(x)   atomic_inc(&(x)->allochit)
 411#define STATS_INC_ALLOCMISS(x)  atomic_inc(&(x)->allocmiss)
 412#define STATS_INC_FREEHIT(x)    atomic_inc(&(x)->freehit)
 413#define STATS_INC_FREEMISS(x)   atomic_inc(&(x)->freemiss)
 414#else
 415#define STATS_INC_ACTIVE(x)     do { } while (0)
 416#define STATS_DEC_ACTIVE(x)     do { } while (0)
 417#define STATS_INC_ALLOCED(x)    do { } while (0)
 418#define STATS_INC_GROWN(x)      do { } while (0)
 419#define STATS_ADD_REAPED(x,y)   do { (void)(y); } while (0)
 420#define STATS_SET_HIGH(x)       do { } while (0)
 421#define STATS_INC_ERR(x)        do { } while (0)
 422#define STATS_INC_NODEALLOCS(x) do { } while (0)
 423#define STATS_INC_NODEFREES(x)  do { } while (0)
 424#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
 425#define STATS_SET_FREEABLE(x, i) do { } while (0)
 426#define STATS_INC_ALLOCHIT(x)   do { } while (0)
 427#define STATS_INC_ALLOCMISS(x)  do { } while (0)
 428#define STATS_INC_FREEHIT(x)    do { } while (0)
 429#define STATS_INC_FREEMISS(x)   do { } while (0)
 430#endif
 431
 432#if DEBUG
 433
 434/*
 435 * memory layout of objects:
 436 * 0            : objp
 437 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
 438 *              the end of an object is aligned with the end of the real
 439 *              allocation. Catches writes behind the end of the allocation.
 440 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
 441 *              redzone word.
 442 * cachep->obj_offset: The real object.
 443 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 444 * cachep->size - 1* BYTES_PER_WORD: last caller address
 445 *                                      [BYTES_PER_WORD long]
 446 */
 447static int obj_offset(struct kmem_cache *cachep)
 448{
 449        return cachep->obj_offset;
 450}
 451
 452static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
 453{
 454        BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 455        return (unsigned long long*) (objp + obj_offset(cachep) -
 456                                      sizeof(unsigned long long));
 457}
 458
 459static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
 460{
 461        BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 462        if (cachep->flags & SLAB_STORE_USER)
 463                return (unsigned long long *)(objp + cachep->size -
 464                                              sizeof(unsigned long long) -
 465                                              REDZONE_ALIGN);
 466        return (unsigned long long *) (objp + cachep->size -
 467                                       sizeof(unsigned long long));
 468}
 469
 470static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 471{
 472        BUG_ON(!(cachep->flags & SLAB_STORE_USER));
 473        return (void **)(objp + cachep->size - BYTES_PER_WORD);
 474}
 475
 476#else
 477
 478#define obj_offset(x)                   0
 479#define dbg_redzone1(cachep, objp)      ({BUG(); (unsigned long long *)NULL;})
 480#define dbg_redzone2(cachep, objp)      ({BUG(); (unsigned long long *)NULL;})
 481#define dbg_userword(cachep, objp)      ({BUG(); (void **)NULL;})
 482
 483#endif
 484
 485/*
 486 * Do not go above this order unless 0 objects fit into the slab or
 487 * overridden on the command line.
 488 */
 489#define SLAB_MAX_ORDER_HI       1
 490#define SLAB_MAX_ORDER_LO       0
 491static int slab_max_order = SLAB_MAX_ORDER_LO;
 492static bool slab_max_order_set __initdata;
 493
 494static inline struct kmem_cache *virt_to_cache(const void *obj)
 495{
 496        struct page *page = virt_to_head_page(obj);
 497        return page->slab_cache;
 498}
 499
 500static inline struct slab *virt_to_slab(const void *obj)
 501{
 502        struct page *page = virt_to_head_page(obj);
 503
 504        VM_BUG_ON(!PageSlab(page));
 505        return page->slab_page;
 506}
 507
 508static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
 509                                 unsigned int idx)
 510{
 511        return slab->s_mem + cache->size * idx;
 512}
 513
 514/*
 515 * We want to avoid an expensive divide : (offset / cache->size)
 516 *   Using the fact that size is a constant for a particular cache,
 517 *   we can replace (offset / cache->size) by
 518 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 519 */
 520static inline unsigned int obj_to_index(const struct kmem_cache *cache,
 521                                        const struct slab *slab, void *obj)
 522{
 523        u32 offset = (obj - slab->s_mem);
 524        return reciprocal_divide(offset, cache->reciprocal_buffer_size);
 525}
 526
 527/*
 528 * These are the default caches for kmalloc. Custom caches can have other sizes.
 529 */
 530struct cache_sizes malloc_sizes[] = {
 531#define CACHE(x) { .cs_size = (x) },
 532#include <linux/kmalloc_sizes.h>
 533        CACHE(ULONG_MAX)
 534#undef CACHE
 535};
 536EXPORT_SYMBOL(malloc_sizes);
 537
 538/* Must match cache_sizes above. Out of line to keep cache footprint low. */
 539struct cache_names {
 540        char *name;
 541        char *name_dma;
 542};
 543
 544static struct cache_names __initdata cache_names[] = {
 545#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
 546#include <linux/kmalloc_sizes.h>
 547        {NULL,}
 548#undef CACHE
 549};
 550
 551static struct arraycache_init initarray_generic =
 552    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
 553
 554/* internal cache of cache description objs */
 555static struct kmem_cache kmem_cache_boot = {
 556        .batchcount = 1,
 557        .limit = BOOT_CPUCACHE_ENTRIES,
 558        .shared = 1,
 559        .size = sizeof(struct kmem_cache),
 560        .name = "kmem_cache",
 561};
 562
 563#define BAD_ALIEN_MAGIC 0x01020304ul
 564
 565#ifdef CONFIG_LOCKDEP
 566
 567/*
 568 * Slab sometimes uses the kmalloc slabs to store the slab headers
 569 * for other slabs "off slab".
 570 * The locking for this is tricky in that it nests within the locks
 571 * of all other slabs in a few places; to deal with this special
 572 * locking we put on-slab caches into a separate lock-class.
 573 *
 574 * We set lock class for alien array caches which are up during init.
 575 * The lock annotation will be lost if all cpus of a node goes down and
 576 * then comes back up during hotplug
 577 */
 578static struct lock_class_key on_slab_l3_key;
 579static struct lock_class_key on_slab_alc_key;
 580
 581static struct lock_class_key debugobj_l3_key;
 582static struct lock_class_key debugobj_alc_key;
 583
 584static void slab_set_lock_classes(struct kmem_cache *cachep,
 585                struct lock_class_key *l3_key, struct lock_class_key *alc_key,
 586                int q)
 587{
 588        struct array_cache **alc;
 589        struct kmem_list3 *l3;
 590        int r;
 591
 592        l3 = cachep->nodelists[q];
 593        if (!l3)
 594                return;
 595
 596        lockdep_set_class(&l3->list_lock, l3_key);
 597        alc = l3->alien;
 598        /*
 599         * FIXME: This check for BAD_ALIEN_MAGIC
 600         * should go away when common slab code is taught to
 601         * work even without alien caches.
 602         * Currently, non NUMA code returns BAD_ALIEN_MAGIC
 603         * for alloc_alien_cache,
 604         */
 605        if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
 606                return;
 607        for_each_node(r) {
 608                if (alc[r])
 609                        lockdep_set_class(&alc[r]->lock, alc_key);
 610        }
 611}
 612
 613static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
 614{
 615        slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
 616}
 617
 618static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
 619{
 620        int node;
 621
 622        for_each_online_node(node)
 623                slab_set_debugobj_lock_classes_node(cachep, node);
 624}
 625
 626static void init_node_lock_keys(int q)
 627{
 628        struct cache_sizes *s = malloc_sizes;
 629
 630        if (slab_state < UP)
 631                return;
 632
 633        for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
 634                struct kmem_list3 *l3;
 635
 636                l3 = s->cs_cachep->nodelists[q];
 637                if (!l3 || OFF_SLAB(s->cs_cachep))
 638                        continue;
 639
 640                slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
 641                                &on_slab_alc_key, q);
 642        }
 643}
 644
 645static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
 646{
 647        struct kmem_list3 *l3;
 648        l3 = cachep->nodelists[q];
 649        if (!l3)
 650                return;
 651
 652        slab_set_lock_classes(cachep, &on_slab_l3_key,
 653                        &on_slab_alc_key, q);
 654}
 655
 656static inline void on_slab_lock_classes(struct kmem_cache *cachep)
 657{
 658        int node;
 659
 660        VM_BUG_ON(OFF_SLAB(cachep));
 661        for_each_node(node)
 662                on_slab_lock_classes_node(cachep, node);
 663}
 664
 665static inline void init_lock_keys(void)
 666{
 667        int node;
 668
 669        for_each_node(node)
 670                init_node_lock_keys(node);
 671}
 672#else
 673static void init_node_lock_keys(int q)
 674{
 675}
 676
 677static inline void init_lock_keys(void)
 678{
 679}
 680
 681static inline void on_slab_lock_classes(struct kmem_cache *cachep)
 682{
 683}
 684
 685static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
 686{
 687}
 688
 689static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
 690{
 691}
 692
 693static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
 694{
 695}
 696#endif
 697
 698static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
 699
 700static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 701{
 702        return cachep->array[smp_processor_id()];
 703}
 704
 705static inline struct kmem_cache *__find_general_cachep(size_t size,
 706                                                        gfp_t gfpflags)
 707{
 708        struct cache_sizes *csizep = malloc_sizes;
 709
 710#if DEBUG
 711        /* This happens if someone tries to call
 712         * kmem_cache_create(), or __kmalloc(), before
 713         * the generic caches are initialized.
 714         */
 715        BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
 716#endif
 717        if (!size)
 718                return ZERO_SIZE_PTR;
 719
 720        while (size > csizep->cs_size)
 721                csizep++;
 722
 723        /*
 724         * Really subtle: The last entry with cs->cs_size==ULONG_MAX
 725         * has cs_{dma,}cachep==NULL. Thus no special case
 726         * for large kmalloc calls required.
 727         */
 728#ifdef CONFIG_ZONE_DMA
 729        if (unlikely(gfpflags & GFP_DMA))
 730                return csizep->cs_dmacachep;
 731#endif
 732        return csizep->cs_cachep;
 733}
 734
 735static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
 736{
 737        return __find_general_cachep(size, gfpflags);
 738}
 739
 740static size_t slab_mgmt_size(size_t nr_objs, size_t align)
 741{
 742        return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
 743}
 744
 745/*
 746 * Calculate the number of objects and left-over bytes for a given buffer size.
 747 */
 748static void cache_estimate(unsigned long gfporder, size_t buffer_size,
 749                           size_t align, int flags, size_t *left_over,
 750                           unsigned int *num)
 751{
 752        int nr_objs;
 753        size_t mgmt_size;
 754        size_t slab_size = PAGE_SIZE << gfporder;
 755
 756        /*
 757         * The slab management structure can be either off the slab or
 758         * on it. For the latter case, the memory allocated for a
 759         * slab is used for:
 760         *
 761         * - The struct slab
 762         * - One kmem_bufctl_t for each object
 763         * - Padding to respect alignment of @align
 764         * - @buffer_size bytes for each object
 765         *
 766         * If the slab management structure is off the slab, then the
 767         * alignment will already be calculated into the size. Because
 768         * the slabs are all pages aligned, the objects will be at the
 769         * correct alignment when allocated.
 770         */
 771        if (flags & CFLGS_OFF_SLAB) {
 772                mgmt_size = 0;
 773                nr_objs = slab_size / buffer_size;
 774
 775                if (nr_objs > SLAB_LIMIT)
 776                        nr_objs = SLAB_LIMIT;
 777        } else {
 778                /*
 779                 * Ignore padding for the initial guess. The padding
 780                 * is at most @align-1 bytes, and @buffer_size is at
 781                 * least @align. In the worst case, this result will
 782                 * be one greater than the number of objects that fit
 783                 * into the memory allocation when taking the padding
 784                 * into account.
 785                 */
 786                nr_objs = (slab_size - sizeof(struct slab)) /
 787                          (buffer_size + sizeof(kmem_bufctl_t));
 788
 789                /*
 790                 * This calculated number will be either the right
 791                 * amount, or one greater than what we want.
 792                 */
 793                if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
 794                       > slab_size)
 795                        nr_objs--;
 796
 797                if (nr_objs > SLAB_LIMIT)
 798                        nr_objs = SLAB_LIMIT;
 799
 800                mgmt_size = slab_mgmt_size(nr_objs, align);
 801        }
 802        *num = nr_objs;
 803        *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
 804}
 805
 806#if DEBUG
 807#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
 808
 809static void __slab_error(const char *function, struct kmem_cache *cachep,
 810                        char *msg)
 811{
 812        printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
 813               function, cachep->name, msg);
 814        dump_stack();
 815        add_taint(TAINT_BAD_PAGE);
 816}
 817#endif
 818
 819/*
 820 * By default on NUMA we use alien caches to stage the freeing of
 821 * objects allocated from other nodes. This causes massive memory
 822 * inefficiencies when using fake NUMA setup to split memory into a
 823 * large number of small nodes, so it can be disabled on the command
 824 * line
 825  */
 826
 827static int use_alien_caches __read_mostly = 1;
 828static int __init noaliencache_setup(char *s)
 829{
 830        use_alien_caches = 0;
 831        return 1;
 832}
 833__setup("noaliencache", noaliencache_setup);
 834
 835static int __init slab_max_order_setup(char *str)
 836{
 837        get_option(&str, &slab_max_order);
 838        slab_max_order = slab_max_order < 0 ? 0 :
 839                                min(slab_max_order, MAX_ORDER - 1);
 840        slab_max_order_set = true;
 841
 842        return 1;
 843}
 844__setup("slab_max_order=", slab_max_order_setup);
 845
 846#ifdef CONFIG_NUMA
 847/*
 848 * Special reaping functions for NUMA systems called from cache_reap().
 849 * These take care of doing round robin flushing of alien caches (containing
 850 * objects freed on different nodes from which they were allocated) and the
 851 * flushing of remote pcps by calling drain_node_pages.
 852 */
 853static DEFINE_PER_CPU(unsigned long, slab_reap_node);
 854
 855static void init_reap_node(int cpu)
 856{
 857        int node;
 858
 859        node = next_node(cpu_to_mem(cpu), node_online_map);
 860        if (node == MAX_NUMNODES)
 861                node = first_node(node_online_map);
 862
 863        per_cpu(slab_reap_node, cpu) = node;
 864}
 865
 866static void next_reap_node(void)
 867{
 868        int node = __this_cpu_read(slab_reap_node);
 869
 870        node = next_node(node, node_online_map);
 871        if (unlikely(node >= MAX_NUMNODES))
 872                node = first_node(node_online_map);
 873        __this_cpu_write(slab_reap_node, node);
 874}
 875
 876#else
 877#define init_reap_node(cpu) do { } while (0)
 878#define next_reap_node(void) do { } while (0)
 879#endif
 880
 881/*
 882 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 883 * via the workqueue/eventd.
 884 * Add the CPU number into the expiration time to minimize the possibility of
 885 * the CPUs getting into lockstep and contending for the global cache chain
 886 * lock.
 887 */
 888static void __cpuinit start_cpu_timer(int cpu)
 889{
 890        struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
 891
 892        /*
 893         * When this gets called from do_initcalls via cpucache_init(),
 894         * init_workqueues() has already run, so keventd will be setup
 895         * at that time.
 896         */
 897        if (keventd_up() && reap_work->work.func == NULL) {
 898                init_reap_node(cpu);
 899                INIT_DEFERRABLE_WORK(reap_work, cache_reap);
 900                schedule_delayed_work_on(cpu, reap_work,
 901                                        __round_jiffies_relative(HZ, cpu));
 902        }
 903}
 904
 905static struct array_cache *alloc_arraycache(int node, int entries,
 906                                            int batchcount, gfp_t gfp)
 907{
 908        int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
 909        struct array_cache *nc = NULL;
 910
 911        nc = kmalloc_node(memsize, gfp, node);
 912        /*
 913         * The array_cache structures contain pointers to free object.
 914         * However, when such objects are allocated or transferred to another
 915         * cache the pointers are not cleared and they could be counted as
 916         * valid references during a kmemleak scan. Therefore, kmemleak must
 917         * not scan such objects.
 918         */
 919        kmemleak_no_scan(nc);
 920        if (nc) {
 921                nc->avail = 0;
 922                nc->limit = entries;
 923                nc->batchcount = batchcount;
 924                nc->touched = 0;
 925                spin_lock_init(&nc->lock);
 926        }
 927        return nc;
 928}
 929
 930static inline bool is_slab_pfmemalloc(struct slab *slabp)
 931{
 932        struct page *page = virt_to_page(slabp->s_mem);
 933
 934        return PageSlabPfmemalloc(page);
 935}
 936
 937/* Clears pfmemalloc_active if no slabs have pfmalloc set */
 938static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
 939                                                struct array_cache *ac)
 940{
 941        struct kmem_list3 *l3 = cachep->nodelists[numa_mem_id()];
 942        struct slab *slabp;
 943        unsigned long flags;
 944
 945        if (!pfmemalloc_active)
 946                return;
 947
 948        spin_lock_irqsave(&l3->list_lock, flags);
 949        list_for_each_entry(slabp, &l3->slabs_full, list)
 950                if (is_slab_pfmemalloc(slabp))
 951                        goto out;
 952
 953        list_for_each_entry(slabp, &l3->slabs_partial, list)
 954                if (is_slab_pfmemalloc(slabp))
 955                        goto out;
 956
 957        list_for_each_entry(slabp, &l3->slabs_free, list)
 958                if (is_slab_pfmemalloc(slabp))
 959                        goto out;
 960
 961        pfmemalloc_active = false;
 962out:
 963        spin_unlock_irqrestore(&l3->list_lock, flags);
 964}
 965
 966static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
 967                                                gfp_t flags, bool force_refill)
 968{
 969        int i;
 970        void *objp = ac->entry[--ac->avail];
 971
 972        /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
 973        if (unlikely(is_obj_pfmemalloc(objp))) {
 974                struct kmem_list3 *l3;
 975
 976                if (gfp_pfmemalloc_allowed(flags)) {
 977                        clear_obj_pfmemalloc(&objp);
 978                        return objp;
 979                }
 980
 981                /* The caller cannot use PFMEMALLOC objects, find another one */
 982                for (i = 0; i < ac->avail; i++) {
 983                        /* If a !PFMEMALLOC object is found, swap them */
 984                        if (!is_obj_pfmemalloc(ac->entry[i])) {
 985                                objp = ac->entry[i];
 986                                ac->entry[i] = ac->entry[ac->avail];
 987                                ac->entry[ac->avail] = objp;
 988                                return objp;
 989                        }
 990                }
 991
 992                /*
 993                 * If there are empty slabs on the slabs_free list and we are
 994                 * being forced to refill the cache, mark this one !pfmemalloc.
 995                 */
 996                l3 = cachep->nodelists[numa_mem_id()];
 997                if (!list_empty(&l3->slabs_free) && force_refill) {
 998                        struct slab *slabp = virt_to_slab(objp);
 999                        ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
1000                        clear_obj_pfmemalloc(&objp);
1001                        recheck_pfmemalloc_active(cachep, ac);
1002                        return objp;
1003                }
1004
1005                /* No !PFMEMALLOC objects available */
1006                ac->avail++;
1007                objp = NULL;
1008        }
1009
1010        return objp;
1011}
1012
1013static inline void *ac_get_obj(struct kmem_cache *cachep,
1014                        struct array_cache *ac, gfp_t flags, bool force_refill)
1015{
1016        void *objp;
1017
1018        if (unlikely(sk_memalloc_socks()))
1019                objp = __ac_get_obj(cachep, ac, flags, force_refill);
1020        else
1021                objp = ac->entry[--ac->avail];
1022
1023        return objp;
1024}
1025
1026static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
1027                                                                void *objp)
1028{
1029        if (unlikely(pfmemalloc_active)) {
1030                /* Some pfmemalloc slabs exist, check if this is one */
1031                struct page *page = virt_to_head_page(objp);
1032                if (PageSlabPfmemalloc(page))
1033                        set_obj_pfmemalloc(&objp);
1034        }
1035
1036        return objp;
1037}
1038
1039static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
1040                                                                void *objp)
1041{
1042        if (unlikely(sk_memalloc_socks()))
1043                objp = __ac_put_obj(cachep, ac, objp);
1044
1045        ac->entry[ac->avail++] = objp;
1046}
1047
1048/*
1049 * Transfer objects in one arraycache to another.
1050 * Locking must be handled by the caller.
1051 *
1052 * Return the number of entries transferred.
1053 */
1054static int transfer_objects(struct array_cache *to,
1055                struct array_cache *from, unsigned int max)
1056{
1057        /* Figure out how many entries to transfer */
1058        int nr = min3(from->avail, max, to->limit - to->avail);
1059
1060        if (!nr)
1061                return 0;
1062
1063        memcpy(to->entry + to->avail, from->entry + from->avail -nr,
1064                        sizeof(void *) *nr);
1065
1066        from->avail -= nr;
1067        to->avail += nr;
1068        return nr;
1069}
1070
1071#ifndef CONFIG_NUMA
1072
1073#define drain_alien_cache(cachep, alien) do { } while (0)
1074#define reap_alien(cachep, l3) do { } while (0)
1075
1076static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1077{
1078        return (struct array_cache **)BAD_ALIEN_MAGIC;
1079}
1080
1081static inline void free_alien_cache(struct array_cache **ac_ptr)
1082{
1083}
1084
1085static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1086{
1087        return 0;
1088}
1089
1090static inline void *alternate_node_alloc(struct kmem_cache *cachep,
1091                gfp_t flags)
1092{
1093        return NULL;
1094}
1095
1096static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1097                 gfp_t flags, int nodeid)
1098{
1099        return NULL;
1100}
1101
1102#else   /* CONFIG_NUMA */
1103
1104static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1105static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1106
1107static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1108{
1109        struct array_cache **ac_ptr;
1110        int memsize = sizeof(void *) * nr_node_ids;
1111        int i;
1112
1113        if (limit > 1)
1114                limit = 12;
1115        ac_ptr = kzalloc_node(memsize, gfp, node);
1116        if (ac_ptr) {
1117                for_each_node(i) {
1118                        if (i == node || !node_online(i))
1119                                continue;
1120                        ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
1121                        if (!ac_ptr[i]) {
1122                                for (i--; i >= 0; i--)
1123                                        kfree(ac_ptr[i]);
1124                                kfree(ac_ptr);
1125                                return NULL;
1126                        }
1127                }
1128        }
1129        return ac_ptr;
1130}
1131
1132static void free_alien_cache(struct array_cache **ac_ptr)
1133{
1134        int i;
1135
1136        if (!ac_ptr)
1137                return;
1138        for_each_node(i)
1139            kfree(ac_ptr[i]);
1140        kfree(ac_ptr);
1141}
1142
1143static void __drain_alien_cache(struct kmem_cache *cachep,
1144                                struct array_cache *ac, int node)
1145{
1146        struct kmem_list3 *rl3 = cachep->nodelists[node];
1147
1148        if (ac->avail) {
1149                spin_lock(&rl3->list_lock);
1150                /*
1151                 * Stuff objects into the remote nodes shared array first.
1152                 * That way we could avoid the overhead of putting the objects
1153                 * into the free lists and getting them back later.
1154                 */
1155                if (rl3->shared)
1156                        transfer_objects(rl3->shared, ac, ac->limit);
1157
1158                free_block(cachep, ac->entry, ac->avail, node);
1159                ac->avail = 0;
1160                spin_unlock(&rl3->list_lock);
1161        }
1162}
1163
1164/*
1165 * Called from cache_reap() to regularly drain alien caches round robin.
1166 */
1167static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1168{
1169        int node = __this_cpu_read(slab_reap_node);
1170
1171        if (l3->alien) {
1172                struct array_cache *ac = l3->alien[node];
1173
1174                if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1175                        __drain_alien_cache(cachep, ac, node);
1176                        spin_unlock_irq(&ac->lock);
1177                }
1178        }
1179}
1180
1181static void drain_alien_cache(struct kmem_cache *cachep,
1182                                struct array_cache **alien)
1183{
1184        int i = 0;
1185        struct array_cache *ac;
1186        unsigned long flags;
1187
1188        for_each_online_node(i) {
1189                ac = alien[i];
1190                if (ac) {
1191                        spin_lock_irqsave(&ac->lock, flags);
1192                        __drain_alien_cache(cachep, ac, i);
1193                        spin_unlock_irqrestore(&ac->lock, flags);
1194                }
1195        }
1196}
1197
1198static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1199{
1200        struct slab *slabp = virt_to_slab(objp);
1201        int nodeid = slabp->nodeid;
1202        struct kmem_list3 *l3;
1203        struct array_cache *alien = NULL;
1204        int node;
1205
1206        node = numa_mem_id();
1207
1208        /*
1209         * Make sure we are not freeing a object from another node to the array
1210         * cache on this cpu.
1211         */
1212        if (likely(slabp->nodeid == node))
1213                return 0;
1214
1215        l3 = cachep->nodelists[node];
1216        STATS_INC_NODEFREES(cachep);
1217        if (l3->alien && l3->alien[nodeid]) {
1218                alien = l3->alien[nodeid];
1219                spin_lock(&alien->lock);
1220                if (unlikely(alien->avail == alien->limit)) {
1221                        STATS_INC_ACOVERFLOW(cachep);
1222                        __drain_alien_cache(cachep, alien, nodeid);
1223                }
1224                ac_put_obj(cachep, alien, objp);
1225                spin_unlock(&alien->lock);
1226        } else {
1227                spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1228                free_block(cachep, &objp, 1, nodeid);
1229                spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1230        }
1231        return 1;
1232}
1233#endif
1234
1235/*
1236 * Allocates and initializes nodelists for a node on each slab cache, used for
1237 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_list3
1238 * will be allocated off-node since memory is not yet online for the new node.
1239 * When hotplugging memory or a cpu, existing nodelists are not replaced if
1240 * already in use.
1241 *
1242 * Must hold slab_mutex.
1243 */
1244static int init_cache_nodelists_node(int node)
1245{
1246        struct kmem_cache *cachep;
1247        struct kmem_list3 *l3;
1248        const int memsize = sizeof(struct kmem_list3);
1249
1250        list_for_each_entry(cachep, &slab_caches, list) {
1251                /*
1252                 * Set up the size64 kmemlist for cpu before we can
1253                 * begin anything. Make sure some other cpu on this
1254                 * node has not already allocated this
1255                 */
1256                if (!cachep->nodelists[node]) {
1257                        l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1258                        if (!l3)
1259                                return -ENOMEM;
1260                        kmem_list3_init(l3);
1261                        l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1262                            ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1263
1264                        /*
1265                         * The l3s don't come and go as CPUs come and
1266                         * go.  slab_mutex is sufficient
1267                         * protection here.
1268                         */
1269                        cachep->nodelists[node] = l3;
1270                }
1271
1272                spin_lock_irq(&cachep->nodelists[node]->list_lock);
1273                cachep->nodelists[node]->free_limit =
1274                        (1 + nr_cpus_node(node)) *
1275                        cachep->batchcount + cachep->num;
1276                spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1277        }
1278        return 0;
1279}
1280
1281static void __cpuinit cpuup_canceled(long cpu)
1282{
1283        struct kmem_cache *cachep;
1284        struct kmem_list3 *l3 = NULL;
1285        int node = cpu_to_mem(cpu);
1286        const struct cpumask *mask = cpumask_of_node(node);
1287
1288        list_for_each_entry(cachep, &slab_caches, list) {
1289                struct array_cache *nc;
1290                struct array_cache *shared;
1291                struct array_cache **alien;
1292
1293                /* cpu is dead; no one can alloc from it. */
1294                nc = cachep->array[cpu];
1295                cachep->array[cpu] = NULL;
1296                l3 = cachep->nodelists[node];
1297
1298                if (!l3)
1299                        goto free_array_cache;
1300
1301                spin_lock_irq(&l3->list_lock);
1302
1303                /* Free limit for this kmem_list3 */
1304                l3->free_limit -= cachep->batchcount;
1305                if (nc)
1306                        free_block(cachep, nc->entry, nc->avail, node);
1307
1308                if (!cpumask_empty(mask)) {
1309                        spin_unlock_irq(&l3->list_lock);
1310                        goto free_array_cache;
1311                }
1312
1313                shared = l3->shared;
1314                if (shared) {
1315                        free_block(cachep, shared->entry,
1316                                   shared->avail, node);
1317                        l3->shared = NULL;
1318                }
1319
1320                alien = l3->alien;
1321                l3->alien = NULL;
1322
1323                spin_unlock_irq(&l3->list_lock);
1324
1325                kfree(shared);
1326                if (alien) {
1327                        drain_alien_cache(cachep, alien);
1328                        free_alien_cache(alien);
1329                }
1330free_array_cache:
1331                kfree(nc);
1332        }
1333        /*
1334         * In the previous loop, all the objects were freed to
1335         * the respective cache's slabs,  now we can go ahead and
1336         * shrink each nodelist to its limit.
1337         */
1338        list_for_each_entry(cachep, &slab_caches, list) {
1339                l3 = cachep->nodelists[node];
1340                if (!l3)
1341                        continue;
1342                drain_freelist(cachep, l3, l3->free_objects);
1343        }
1344}
1345
1346static int __cpuinit cpuup_prepare(long cpu)
1347{
1348        struct kmem_cache *cachep;
1349        struct kmem_list3 *l3 = NULL;
1350        int node = cpu_to_mem(cpu);
1351        int err;
1352
1353        /*
1354         * We need to do this right in the beginning since
1355         * alloc_arraycache's are going to use this list.
1356         * kmalloc_node allows us to add the slab to the right
1357         * kmem_list3 and not this cpu's kmem_list3
1358         */
1359        err = init_cache_nodelists_node(node);
1360        if (err < 0)
1361                goto bad;
1362
1363        /*
1364         * Now we can go ahead with allocating the shared arrays and
1365         * array caches
1366         */
1367        list_for_each_entry(cachep, &slab_caches, list) {
1368                struct array_cache *nc;
1369                struct array_cache *shared = NULL;
1370                struct array_cache **alien = NULL;
1371
1372                nc = alloc_arraycache(node, cachep->limit,
1373                                        cachep->batchcount, GFP_KERNEL);
1374                if (!nc)
1375                        goto bad;
1376                if (cachep->shared) {
1377                        shared = alloc_arraycache(node,
1378                                cachep->shared * cachep->batchcount,
1379                                0xbaadf00d, GFP_KERNEL);
1380                        if (!shared) {
1381                                kfree(nc);
1382                                goto bad;
1383                        }
1384                }
1385                if (use_alien_caches) {
1386                        alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1387                        if (!alien) {
1388                                kfree(shared);
1389                                kfree(nc);
1390                                goto bad;
1391                        }
1392                }
1393                cachep->array[cpu] = nc;
1394                l3 = cachep->nodelists[node];
1395                BUG_ON(!l3);
1396
1397                spin_lock_irq(&l3->list_lock);
1398                if (!l3->shared) {
1399                        /*
1400                         * We are serialised from CPU_DEAD or
1401                         * CPU_UP_CANCELLED by the cpucontrol lock
1402                         */
1403                        l3->shared = shared;
1404                        shared = NULL;
1405                }
1406#ifdef CONFIG_NUMA
1407                if (!l3->alien) {
1408                        l3->alien = alien;
1409                        alien = NULL;
1410                }
1411#endif
1412                spin_unlock_irq(&l3->list_lock);
1413                kfree(shared);
1414                free_alien_cache(alien);
1415                if (cachep->flags & SLAB_DEBUG_OBJECTS)
1416                        slab_set_debugobj_lock_classes_node(cachep, node);
1417                else if (!OFF_SLAB(cachep) &&
1418                         !(cachep->flags & SLAB_DESTROY_BY_RCU))
1419                        on_slab_lock_classes_node(cachep, node);
1420        }
1421        init_node_lock_keys(node);
1422
1423        return 0;
1424bad:
1425        cpuup_canceled(cpu);
1426        return -ENOMEM;
1427}
1428
1429static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1430                                    unsigned long action, void *hcpu)
1431{
1432        long cpu = (long)hcpu;
1433        int err = 0;
1434
1435        switch (action) {
1436        case CPU_UP_PREPARE:
1437        case CPU_UP_PREPARE_FROZEN:
1438                mutex_lock(&slab_mutex);
1439                err = cpuup_prepare(cpu);
1440                mutex_unlock(&slab_mutex);
1441                break;
1442        case CPU_ONLINE:
1443        case CPU_ONLINE_FROZEN:
1444                start_cpu_timer(cpu);
1445                break;
1446#ifdef CONFIG_HOTPLUG_CPU
1447        case CPU_DOWN_PREPARE:
1448        case CPU_DOWN_PREPARE_FROZEN:
1449                /*
1450                 * Shutdown cache reaper. Note that the slab_mutex is
1451                 * held so that if cache_reap() is invoked it cannot do
1452                 * anything expensive but will only modify reap_work
1453                 * and reschedule the timer.
1454                */
1455                cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1456                /* Now the cache_reaper is guaranteed to be not running. */
1457                per_cpu(slab_reap_work, cpu).work.func = NULL;
1458                break;
1459        case CPU_DOWN_FAILED:
1460        case CPU_DOWN_FAILED_FROZEN:
1461                start_cpu_timer(cpu);
1462                break;
1463        case CPU_DEAD:
1464        case CPU_DEAD_FROZEN:
1465                /*
1466                 * Even if all the cpus of a node are down, we don't free the
1467                 * kmem_list3 of any cache. This to avoid a race between
1468                 * cpu_down, and a kmalloc allocation from another cpu for
1469                 * memory from the node of the cpu going down.  The list3
1470                 * structure is usually allocated from kmem_cache_create() and
1471                 * gets destroyed at kmem_cache_destroy().
1472                 */
1473                /* fall through */
1474#endif
1475        case CPU_UP_CANCELED:
1476        case CPU_UP_CANCELED_FROZEN:
1477                mutex_lock(&slab_mutex);
1478                cpuup_canceled(cpu);
1479                mutex_unlock(&slab_mutex);
1480                break;
1481        }
1482        return notifier_from_errno(err);
1483}
1484
1485static struct notifier_block __cpuinitdata cpucache_notifier = {
1486        &cpuup_callback, NULL, 0
1487};
1488
1489#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1490/*
1491 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1492 * Returns -EBUSY if all objects cannot be drained so that the node is not
1493 * removed.
1494 *
1495 * Must hold slab_mutex.
1496 */
1497static int __meminit drain_cache_nodelists_node(int node)
1498{
1499        struct kmem_cache *cachep;
1500        int ret = 0;
1501
1502        list_for_each_entry(cachep, &slab_caches, list) {
1503                struct kmem_list3 *l3;
1504
1505                l3 = cachep->nodelists[node];
1506                if (!l3)
1507                        continue;
1508
1509                drain_freelist(cachep, l3, l3->free_objects);
1510
1511                if (!list_empty(&l3->slabs_full) ||
1512                    !list_empty(&l3->slabs_partial)) {
1513                        ret = -EBUSY;
1514                        break;
1515                }
1516        }
1517        return ret;
1518}
1519
1520static int __meminit slab_memory_callback(struct notifier_block *self,
1521                                        unsigned long action, void *arg)
1522{
1523        struct memory_notify *mnb = arg;
1524        int ret = 0;
1525        int nid;
1526
1527        nid = mnb->status_change_nid;
1528        if (nid < 0)
1529                goto out;
1530
1531        switch (action) {
1532        case MEM_GOING_ONLINE:
1533                mutex_lock(&slab_mutex);
1534                ret = init_cache_nodelists_node(nid);
1535                mutex_unlock(&slab_mutex);
1536                break;
1537        case MEM_GOING_OFFLINE:
1538                mutex_lock(&slab_mutex);
1539                ret = drain_cache_nodelists_node(nid);
1540                mutex_unlock(&slab_mutex);
1541                break;
1542        case MEM_ONLINE:
1543        case MEM_OFFLINE:
1544        case MEM_CANCEL_ONLINE:
1545        case MEM_CANCEL_OFFLINE:
1546                break;
1547        }
1548out:
1549        return notifier_from_errno(ret);
1550}
1551#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1552
1553/*
1554 * swap the static kmem_list3 with kmalloced memory
1555 */
1556static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1557                                int nodeid)
1558{
1559        struct kmem_list3 *ptr;
1560
1561        ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid);
1562        BUG_ON(!ptr);
1563
1564        memcpy(ptr, list, sizeof(struct kmem_list3));
1565        /*
1566         * Do not assume that spinlocks can be initialized via memcpy:
1567         */
1568        spin_lock_init(&ptr->list_lock);
1569
1570        MAKE_ALL_LISTS(cachep, ptr, nodeid);
1571        cachep->nodelists[nodeid] = ptr;
1572}
1573
1574/*
1575 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1576 * size of kmem_list3.
1577 */
1578static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1579{
1580        int node;
1581
1582        for_each_online_node(node) {
1583                cachep->nodelists[node] = &initkmem_list3[index + node];
1584                cachep->nodelists[node]->next_reap = jiffies +
1585                    REAPTIMEOUT_LIST3 +
1586                    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1587        }
1588}
1589
1590/*
1591 * The memory after the last cpu cache pointer is used for the
1592 * the nodelists pointer.
1593 */
1594static void setup_nodelists_pointer(struct kmem_cache *cachep)
1595{
1596        cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
1597}
1598
1599/*
1600 * Initialisation.  Called after the page allocator have been initialised and
1601 * before smp_init().
1602 */
1603void __init kmem_cache_init(void)
1604{
1605        struct cache_sizes *sizes;
1606        struct cache_names *names;
1607        int i;
1608
1609        kmem_cache = &kmem_cache_boot;
1610        setup_nodelists_pointer(kmem_cache);
1611
1612        if (num_possible_nodes() == 1)
1613                use_alien_caches = 0;
1614
1615        for (i = 0; i < NUM_INIT_LISTS; i++)
1616                kmem_list3_init(&initkmem_list3[i]);
1617
1618        set_up_list3s(kmem_cache, CACHE_CACHE);
1619
1620        /*
1621         * Fragmentation resistance on low memory - only use bigger
1622         * page orders on machines with more than 32MB of memory if
1623         * not overridden on the command line.
1624         */
1625        if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1626                slab_max_order = SLAB_MAX_ORDER_HI;
1627
1628        /* Bootstrap is tricky, because several objects are allocated
1629         * from caches that do not exist yet:
1630         * 1) initialize the kmem_cache cache: it contains the struct
1631         *    kmem_cache structures of all caches, except kmem_cache itself:
1632         *    kmem_cache is statically allocated.
1633         *    Initially an __init data area is used for the head array and the
1634         *    kmem_list3 structures, it's replaced with a kmalloc allocated
1635         *    array at the end of the bootstrap.
1636         * 2) Create the first kmalloc cache.
1637         *    The struct kmem_cache for the new cache is allocated normally.
1638         *    An __init data area is used for the head array.
1639         * 3) Create the remaining kmalloc caches, with minimally sized
1640         *    head arrays.
1641         * 4) Replace the __init data head arrays for kmem_cache and the first
1642         *    kmalloc cache with kmalloc allocated arrays.
1643         * 5) Replace the __init data for kmem_list3 for kmem_cache and
1644         *    the other cache's with kmalloc allocated memory.
1645         * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1646         */
1647
1648        /* 1) create the kmem_cache */
1649
1650        /*
1651         * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1652         */
1653        create_boot_cache(kmem_cache, "kmem_cache",
1654                offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1655                                  nr_node_ids * sizeof(struct kmem_list3 *),
1656                                  SLAB_HWCACHE_ALIGN);
1657        list_add(&kmem_cache->list, &slab_caches);
1658
1659        /* 2+3) create the kmalloc caches */
1660        sizes = malloc_sizes;
1661        names = cache_names;
1662
1663        /*
1664         * Initialize the caches that provide memory for the array cache and the
1665         * kmem_list3 structures first.  Without this, further allocations will
1666         * bug.
1667         */
1668
1669        sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
1670                                        sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
1671
1672        if (INDEX_AC != INDEX_L3)
1673                sizes[INDEX_L3].cs_cachep =
1674                        create_kmalloc_cache(names[INDEX_L3].name,
1675                                sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
1676
1677        slab_early_init = 0;
1678
1679        while (sizes->cs_size != ULONG_MAX) {
1680                /*
1681                 * For performance, all the general caches are L1 aligned.
1682                 * This should be particularly beneficial on SMP boxes, as it
1683                 * eliminates "false sharing".
1684                 * Note for systems short on memory removing the alignment will
1685                 * allow tighter packing of the smaller caches.
1686                 */
1687                if (!sizes->cs_cachep)
1688                        sizes->cs_cachep = create_kmalloc_cache(names->name,
1689                                        sizes->cs_size, ARCH_KMALLOC_FLAGS);
1690
1691#ifdef CONFIG_ZONE_DMA
1692                sizes->cs_dmacachep = create_kmalloc_cache(
1693                        names->name_dma, sizes->cs_size,
1694                        SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
1695#endif
1696                sizes++;
1697                names++;
1698        }
1699        /* 4) Replace the bootstrap head arrays */
1700        {
1701                struct array_cache *ptr;
1702
1703                ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1704
1705                memcpy(ptr, cpu_cache_get(kmem_cache),
1706                       sizeof(struct arraycache_init));
1707                /*
1708                 * Do not assume that spinlocks can be initialized via memcpy:
1709                 */
1710                spin_lock_init(&ptr->lock);
1711
1712                kmem_cache->array[smp_processor_id()] = ptr;
1713
1714                ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1715
1716                BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1717                       != &initarray_generic.cache);
1718                memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
1719                       sizeof(struct arraycache_init));
1720                /*
1721                 * Do not assume that spinlocks can be initialized via memcpy:
1722                 */
1723                spin_lock_init(&ptr->lock);
1724
1725                malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1726                    ptr;
1727        }
1728        /* 5) Replace the bootstrap kmem_list3's */
1729        {
1730                int nid;
1731
1732                for_each_online_node(nid) {
1733                        init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
1734
1735                        init_list(malloc_sizes[INDEX_AC].cs_cachep,
1736                                  &initkmem_list3[SIZE_AC + nid], nid);
1737
1738                        if (INDEX_AC != INDEX_L3) {
1739                                init_list(malloc_sizes[INDEX_L3].cs_cachep,
1740                                          &initkmem_list3[SIZE_L3 + nid], nid);
1741                        }
1742                }
1743        }
1744
1745        slab_state = UP;
1746}
1747
1748void __init kmem_cache_init_late(void)
1749{
1750        struct kmem_cache *cachep;
1751
1752        slab_state = UP;
1753
1754        /* 6) resize the head arrays to their final sizes */
1755        mutex_lock(&slab_mutex);
1756        list_for_each_entry(cachep, &slab_caches, list)
1757                if (enable_cpucache(cachep, GFP_NOWAIT))
1758                        BUG();
1759        mutex_unlock(&slab_mutex);
1760
1761        /* Annotate slab for lockdep -- annotate the malloc caches */
1762        init_lock_keys();
1763
1764        /* Done! */
1765        slab_state = FULL;
1766
1767        /*
1768         * Register a cpu startup notifier callback that initializes
1769         * cpu_cache_get for all new cpus
1770         */
1771        register_cpu_notifier(&cpucache_notifier);
1772
1773#ifdef CONFIG_NUMA
1774        /*
1775         * Register a memory hotplug callback that initializes and frees
1776         * nodelists.
1777         */
1778        hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1779#endif
1780
1781        /*
1782         * The reap timers are started later, with a module init call: That part
1783         * of the kernel is not yet operational.
1784         */
1785}
1786
1787static int __init cpucache_init(void)
1788{
1789        int cpu;
1790
1791        /*
1792         * Register the timers that return unneeded pages to the page allocator
1793         */
1794        for_each_online_cpu(cpu)
1795                start_cpu_timer(cpu);
1796
1797        /* Done! */
1798        slab_state = FULL;
1799        return 0;
1800}
1801__initcall(cpucache_init);
1802
1803static noinline void
1804slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1805{
1806        struct kmem_list3 *l3;
1807        struct slab *slabp;
1808        unsigned long flags;
1809        int node;
1810
1811        printk(KERN_WARNING
1812                "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1813                nodeid, gfpflags);
1814        printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
1815                cachep->name, cachep->size, cachep->gfporder);
1816
1817        for_each_online_node(node) {
1818                unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1819                unsigned long active_slabs = 0, num_slabs = 0;
1820
1821                l3 = cachep->nodelists[node];
1822                if (!l3)
1823                        continue;
1824
1825                spin_lock_irqsave(&l3->list_lock, flags);
1826                list_for_each_entry(slabp, &l3->slabs_full, list) {
1827                        active_objs += cachep->num;
1828                        active_slabs++;
1829                }
1830                list_for_each_entry(slabp, &l3->slabs_partial, list) {
1831                        active_objs += slabp->inuse;
1832                        active_slabs++;
1833                }
1834                list_for_each_entry(slabp, &l3->slabs_free, list)
1835                        num_slabs++;
1836
1837                free_objects += l3->free_objects;
1838                spin_unlock_irqrestore(&l3->list_lock, flags);
1839
1840                num_slabs += active_slabs;
1841                num_objs = num_slabs * cachep->num;
1842                printk(KERN_WARNING
1843                        "  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1844                        node, active_slabs, num_slabs, active_objs, num_objs,
1845                        free_objects);
1846        }
1847}
1848
1849/*
1850 * Interface to system's page allocator. No need to hold the cache-lock.
1851 *
1852 * If we requested dmaable memory, we will get it. Even if we
1853 * did not request dmaable memory, we might get it, but that
1854 * would be relatively rare and ignorable.
1855 */
1856static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1857{
1858        struct page *page;
1859        int nr_pages;
1860        int i;
1861
1862#ifndef CONFIG_MMU
1863        /*
1864         * Nommu uses slab's for process anonymous memory allocations, and thus
1865         * requires __GFP_COMP to properly refcount higher order allocations
1866         */
1867        flags |= __GFP_COMP;
1868#endif
1869
1870        flags |= cachep->allocflags;
1871        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1872                flags |= __GFP_RECLAIMABLE;
1873
1874        page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1875        if (!page) {
1876                if (!(flags & __GFP_NOWARN) && printk_ratelimit())
1877                        slab_out_of_memory(cachep, flags, nodeid);
1878                return NULL;
1879        }
1880
1881        /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1882        if (unlikely(page->pfmemalloc))
1883                pfmemalloc_active = true;
1884
1885        nr_pages = (1 << cachep->gfporder);
1886        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1887                add_zone_page_state(page_zone(page),
1888                        NR_SLAB_RECLAIMABLE, nr_pages);
1889        else
1890                add_zone_page_state(page_zone(page),
1891                        NR_SLAB_UNRECLAIMABLE, nr_pages);
1892        for (i = 0; i < nr_pages; i++) {
1893                __SetPageSlab(page + i);
1894
1895                if (page->pfmemalloc)
1896                        SetPageSlabPfmemalloc(page + i);
1897        }
1898        memcg_bind_pages(cachep, cachep->gfporder);
1899
1900        if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1901                kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1902
1903                if (cachep->ctor)
1904                        kmemcheck_mark_uninitialized_pages(page, nr_pages);
1905                else
1906                        kmemcheck_mark_unallocated_pages(page, nr_pages);
1907        }
1908
1909        return page_address(page);
1910}
1911
1912/*
1913 * Interface to system's page release.
1914 */
1915static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1916{
1917        unsigned long i = (1 << cachep->gfporder);
1918        struct page *page = virt_to_page(addr);
1919        const unsigned long nr_freed = i;
1920
1921        kmemcheck_free_shadow(page, cachep->gfporder);
1922
1923        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1924                sub_zone_page_state(page_zone(page),
1925                                NR_SLAB_RECLAIMABLE, nr_freed);
1926        else
1927                sub_zone_page_state(page_zone(page),
1928                                NR_SLAB_UNRECLAIMABLE, nr_freed);
1929        while (i--) {
1930                BUG_ON(!PageSlab(page));
1931                __ClearPageSlabPfmemalloc(page);
1932                __ClearPageSlab(page);
1933                page++;
1934        }
1935
1936        memcg_release_pages(cachep, cachep->gfporder);
1937        if (current->reclaim_state)
1938                current->reclaim_state->reclaimed_slab += nr_freed;
1939        free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder);
1940}
1941
1942static void kmem_rcu_free(struct rcu_head *head)
1943{
1944        struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1945        struct kmem_cache *cachep = slab_rcu->cachep;
1946
1947        kmem_freepages(cachep, slab_rcu->addr);
1948        if (OFF_SLAB(cachep))
1949                kmem_cache_free(cachep->slabp_cache, slab_rcu);
1950}
1951
1952#if DEBUG
1953
1954#ifdef CONFIG_DEBUG_PAGEALLOC
1955static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1956                            unsigned long caller)
1957{
1958        int size = cachep->object_size;
1959
1960        addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1961
1962        if (size < 5 * sizeof(unsigned long))
1963                return;
1964
1965        *addr++ = 0x12345678;
1966        *addr++ = caller;
1967        *addr++ = smp_processor_id();
1968        size -= 3 * sizeof(unsigned long);
1969        {
1970                unsigned long *sptr = &caller;
1971                unsigned long svalue;
1972
1973                while (!kstack_end(sptr)) {
1974                        svalue = *sptr++;
1975                        if (kernel_text_address(svalue)) {
1976                                *addr++ = svalue;
1977                                size -= sizeof(unsigned long);
1978                                if (size <= sizeof(unsigned long))
1979                                        break;
1980                        }
1981                }
1982
1983        }
1984        *addr++ = 0x87654321;
1985}
1986#endif
1987
1988static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1989{
1990        int size = cachep->object_size;
1991        addr = &((char *)addr)[obj_offset(cachep)];
1992
1993        memset(addr, val, size);
1994        *(unsigned char *)(addr + size - 1) = POISON_END;
1995}
1996
1997static void dump_line(char *data, int offset, int limit)
1998{
1999        int i;
2000        unsigned char error = 0;
2001        int bad_count = 0;
2002
2003        printk(KERN_ERR "%03x: ", offset);
2004        for (i = 0; i < limit; i++) {
2005                if (data[offset + i] != POISON_FREE) {
2006                        error = data[offset + i];
2007                        bad_count++;
2008                }
2009        }
2010        print_hex_dump(KERN_CONT, "", 0, 16, 1,
2011                        &data[offset], limit, 1);
2012
2013        if (bad_count == 1) {
2014                error ^= POISON_FREE;
2015                if (!(error & (error - 1))) {
2016                        printk(KERN_ERR "Single bit error detected. Probably "
2017                                        "bad RAM.\n");
2018#ifdef CONFIG_X86
2019                        printk(KERN_ERR "Run memtest86+ or a similar memory "
2020                                        "test tool.\n");
2021#else
2022                        printk(KERN_ERR "Run a memory test tool.\n");
2023#endif
2024                }
2025        }
2026}
2027#endif
2028
2029#if DEBUG
2030
2031static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
2032{
2033        int i, size;
2034        char *realobj;
2035
2036        if (cachep->flags & SLAB_RED_ZONE) {
2037                printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
2038                        *dbg_redzone1(cachep, objp),
2039                        *dbg_redzone2(cachep, objp));
2040        }
2041
2042        if (cachep->flags & SLAB_STORE_USER) {
2043                printk(KERN_ERR "Last user: [<%p>]",
2044                        *dbg_userword(cachep, objp));
2045                print_symbol("(%s)",
2046                                (unsigned long)*dbg_userword(cachep, objp));
2047                printk("\n");
2048        }
2049        realobj = (char *)objp + obj_offset(cachep);
2050        size = cachep->object_size;
2051        for (i = 0; i < size && lines; i += 16, lines--) {
2052                int limit;
2053                limit = 16;
2054                if (i + limit > size)
2055                        limit = size - i;
2056                dump_line(realobj, i, limit);
2057        }
2058}
2059
2060static void check_poison_obj(struct kmem_cache *cachep, void *objp)
2061{
2062        char *realobj;
2063        int size, i;
2064        int lines = 0;
2065
2066        realobj = (char *)objp + obj_offset(cachep);
2067        size = cachep->object_size;
2068
2069        for (i = 0; i < size; i++) {
2070                char exp = POISON_FREE;
2071                if (i == size - 1)
2072                        exp = POISON_END;
2073                if (realobj[i] != exp) {
2074                        int limit;
2075                        /* Mismatch ! */
2076                        /* Print header */
2077                        if (lines == 0) {
2078                                printk(KERN_ERR
2079                                        "Slab corruption (%s): %s start=%p, len=%d\n",
2080                                        print_tainted(), cachep->name, realobj, size);
2081                                print_objinfo(cachep, objp, 0);
2082                        }
2083                        /* Hexdump the affected line */
2084                        i = (i / 16) * 16;
2085                        limit = 16;
2086                        if (i + limit > size)
2087                                limit = size - i;
2088                        dump_line(realobj, i, limit);
2089                        i += 16;
2090                        lines++;
2091                        /* Limit to 5 lines */
2092                        if (lines > 5)
2093                                break;
2094                }
2095        }
2096        if (lines != 0) {
2097                /* Print some data about the neighboring objects, if they
2098                 * exist:
2099                 */
2100                struct slab *slabp = virt_to_slab(objp);
2101                unsigned int objnr;
2102
2103                objnr = obj_to_index(cachep, slabp, objp);
2104                if (objnr) {
2105                        objp = index_to_obj(cachep, slabp, objnr - 1);
2106                        realobj = (char *)objp + obj_offset(cachep);
2107                        printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
2108                               realobj, size);
2109                        print_objinfo(cachep, objp, 2);
2110                }
2111                if (objnr + 1 < cachep->num) {
2112                        objp = index_to_obj(cachep, slabp, objnr + 1);
2113                        realobj = (char *)objp + obj_offset(cachep);
2114                        printk(KERN_ERR "Next obj: start=%p, len=%d\n",
2115                               realobj, size);
2116                        print_objinfo(cachep, objp, 2);
2117                }
2118        }
2119}
2120#endif
2121
2122#if DEBUG
2123static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
2124{
2125        int i;
2126        for (i = 0; i < cachep->num; i++) {
2127                void *objp = index_to_obj(cachep, slabp, i);
2128
2129                if (cachep->flags & SLAB_POISON) {
2130#ifdef CONFIG_DEBUG_PAGEALLOC
2131                        if (cachep->size % PAGE_SIZE == 0 &&
2132                                        OFF_SLAB(cachep))
2133                                kernel_map_pages(virt_to_page(objp),
2134                                        cachep->size / PAGE_SIZE, 1);
2135                        else
2136                                check_poison_obj(cachep, objp);
2137#else
2138                        check_poison_obj(cachep, objp);
2139#endif
2140                }
2141                if (cachep->flags & SLAB_RED_ZONE) {
2142                        if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2143                                slab_error(cachep, "start of a freed object "
2144                                           "was overwritten");
2145                        if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2146                                slab_error(cachep, "end of a freed object "
2147                                           "was overwritten");
2148                }
2149        }
2150}
2151#else
2152static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
2153{
2154}
2155#endif
2156
2157/**
2158 * slab_destroy - destroy and release all objects in a slab
2159 * @cachep: cache pointer being destroyed
2160 * @slabp: slab pointer being destroyed
2161 *
2162 * Destroy all the objs in a slab, and release the mem back to the system.
2163 * Before calling the slab must have been unlinked from the cache.  The
2164 * cache-lock is not held/needed.
2165 */
2166static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2167{
2168        void *addr = slabp->s_mem - slabp->colouroff;
2169
2170        slab_destroy_debugcheck(cachep, slabp);
2171        if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
2172                struct slab_rcu *slab_rcu;
2173
2174                slab_rcu = (struct slab_rcu *)slabp;
2175                slab_rcu->cachep = cachep;
2176                slab_rcu->addr = addr;
2177                call_rcu(&slab_rcu->head, kmem_rcu_free);
2178        } else {
2179                kmem_freepages(cachep, addr);
2180                if (OFF_SLAB(cachep))
2181                        kmem_cache_free(cachep->slabp_cache, slabp);
2182        }
2183}
2184
2185/**
2186 * calculate_slab_order - calculate size (page order) of slabs
2187 * @cachep: pointer to the cache that is being created
2188 * @size: size of objects to be created in this cache.
2189 * @align: required alignment for the objects.
2190 * @flags: slab allocation flags
2191 *
2192 * Also calculates the number of objects per slab.
2193 *
2194 * This could be made much more intelligent.  For now, try to avoid using
2195 * high order pages for slabs.  When the gfp() functions are more friendly
2196 * towards high-order requests, this should be changed.
2197 */
2198static size_t calculate_slab_order(struct kmem_cache *cachep,
2199                        size_t size, size_t align, unsigned long flags)
2200{
2201        unsigned long offslab_limit;
2202        size_t left_over = 0;
2203        int gfporder;
2204
2205        for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2206                unsigned int num;
2207                size_t remainder;
2208
2209                cache_estimate(gfporder, size, align, flags, &remainder, &num);
2210                if (!num)
2211                        continue;
2212
2213                if (flags & CFLGS_OFF_SLAB) {
2214                        /*
2215                         * Max number of objs-per-slab for caches which
2216                         * use off-slab slabs. Needed to avoid a possible
2217                         * looping condition in cache_grow().
2218                         */
2219                        offslab_limit = size - sizeof(struct slab);
2220                        offslab_limit /= sizeof(kmem_bufctl_t);
2221
2222                        if (num > offslab_limit)
2223                                break;
2224                }
2225
2226                /* Found something acceptable - save it away */
2227                cachep->num = num;
2228                cachep->gfporder = gfporder;
2229                left_over = remainder;
2230
2231                /*
2232                 * A VFS-reclaimable slab tends to have most allocations
2233                 * as GFP_NOFS and we really don't want to have to be allocating
2234                 * higher-order pages when we are unable to shrink dcache.
2235                 */
2236                if (flags & SLAB_RECLAIM_ACCOUNT)
2237                        break;
2238
2239                /*
2240                 * Large number of objects is good, but very large slabs are
2241                 * currently bad for the gfp()s.
2242                 */
2243                if (gfporder >= slab_max_order)
2244                        break;
2245
2246                /*
2247                 * Acceptable internal fragmentation?
2248                 */
2249                if (left_over * 8 <= (PAGE_SIZE << gfporder))
2250                        break;
2251        }
2252        return left_over;
2253}
2254
2255static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2256{
2257        if (slab_state >= FULL)
2258                return enable_cpucache(cachep, gfp);
2259
2260        if (slab_state == DOWN) {
2261                /*
2262                 * Note: Creation of first cache (kmem_cache).
2263                 * The setup_list3s is taken care
2264                 * of by the caller of __kmem_cache_create
2265                 */
2266                cachep->array[smp_processor_id()] = &initarray_generic.cache;
2267                slab_state = PARTIAL;
2268        } else if (slab_state == PARTIAL) {
2269                /*
2270                 * Note: the second kmem_cache_create must create the cache
2271                 * that's used by kmalloc(24), otherwise the creation of
2272                 * further caches will BUG().
2273                 */
2274                cachep->array[smp_processor_id()] = &initarray_generic.cache;
2275
2276                /*
2277                 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2278                 * the second cache, then we need to set up all its list3s,
2279                 * otherwise the creation of further caches will BUG().
2280                 */
2281                set_up_list3s(cachep, SIZE_AC);
2282                if (INDEX_AC == INDEX_L3)
2283                        slab_state = PARTIAL_L3;
2284                else
2285                        slab_state = PARTIAL_ARRAYCACHE;
2286        } else {
2287                /* Remaining boot caches */
2288                cachep->array[smp_processor_id()] =
2289                        kmalloc(sizeof(struct arraycache_init), gfp);
2290
2291                if (slab_state == PARTIAL_ARRAYCACHE) {
2292                        set_up_list3s(cachep, SIZE_L3);
2293                        slab_state = PARTIAL_L3;
2294                } else {
2295                        int node;
2296                        for_each_online_node(node) {
2297                                cachep->nodelists[node] =
2298                                    kmalloc_node(sizeof(struct kmem_list3),
2299                                                gfp, node);
2300                                BUG_ON(!cachep->nodelists[node]);
2301                                kmem_list3_init(cachep->nodelists[node]);
2302                        }
2303                }
2304        }
2305        cachep->nodelists[numa_mem_id()]->next_reap =
2306                        jiffies + REAPTIMEOUT_LIST3 +
2307                        ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2308
2309        cpu_cache_get(cachep)->avail = 0;
2310        cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2311        cpu_cache_get(cachep)->batchcount = 1;
2312        cpu_cache_get(cachep)->touched = 0;
2313        cachep->batchcount = 1;
2314        cachep->limit = BOOT_CPUCACHE_ENTRIES;
2315        return 0;
2316}
2317
2318/**
2319 * __kmem_cache_create - Create a cache.
2320 * @cachep: cache management descriptor
2321 * @flags: SLAB flags
2322 *
2323 * Returns a ptr to the cache on success, NULL on failure.
2324 * Cannot be called within a int, but can be interrupted.
2325 * The @ctor is run when new pages are allocated by the cache.
2326 *
2327 * The flags are
2328 *
2329 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2330 * to catch references to uninitialised memory.
2331 *
2332 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2333 * for buffer overruns.
2334 *
2335 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2336 * cacheline.  This can be beneficial if you're counting cycles as closely
2337 * as davem.
2338 */
2339int
2340__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2341{
2342        size_t left_over, slab_size, ralign;
2343        gfp_t gfp;
2344        int err;
2345        size_t size = cachep->size;
2346
2347#if DEBUG
2348#if FORCED_DEBUG
2349        /*
2350         * Enable redzoning and last user accounting, except for caches with
2351         * large objects, if the increased size would increase the object size
2352         * above the next power of two: caches with object sizes just above a
2353         * power of two have a significant amount of internal fragmentation.
2354         */
2355        if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2356                                                2 * sizeof(unsigned long long)))
2357                flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2358        if (!(flags & SLAB_DESTROY_BY_RCU))
2359                flags |= SLAB_POISON;
2360#endif
2361        if (flags & SLAB_DESTROY_BY_RCU)
2362                BUG_ON(flags & SLAB_POISON);
2363#endif
2364
2365        /*
2366         * Check that size is in terms of words.  This is needed to avoid
2367         * unaligned accesses for some archs when redzoning is used, and makes
2368         * sure any on-slab bufctl's are also correctly aligned.
2369         */
2370        if (size & (BYTES_PER_WORD - 1)) {
2371                size += (BYTES_PER_WORD - 1);
2372                size &= ~(BYTES_PER_WORD - 1);
2373        }
2374
2375        /*
2376         * Redzoning and user store require word alignment or possibly larger.
2377         * Note this will be overridden by architecture or caller mandated
2378         * alignment if either is greater than BYTES_PER_WORD.
2379         */
2380        if (flags & SLAB_STORE_USER)
2381                ralign = BYTES_PER_WORD;
2382
2383        if (flags & SLAB_RED_ZONE) {
2384                ralign = REDZONE_ALIGN;
2385                /* If redzoning, ensure that the second redzone is suitably
2386                 * aligned, by adjusting the object size accordingly. */
2387                size += REDZONE_ALIGN - 1;
2388                size &= ~(REDZONE_ALIGN - 1);
2389        }
2390
2391        /* 3) caller mandated alignment */
2392        if (ralign < cachep->align) {
2393                ralign = cachep->align;
2394        }
2395        /* disable debug if necessary */
2396        if (ralign > __alignof__(unsigned long long))
2397                flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2398        /*
2399         * 4) Store it.
2400         */
2401        cachep->align = ralign;
2402
2403        if (slab_is_available())
2404                gfp = GFP_KERNEL;
2405        else
2406                gfp = GFP_NOWAIT;
2407
2408        setup_nodelists_pointer(cachep);
2409#if DEBUG
2410
2411        /*
2412         * Both debugging options require word-alignment which is calculated
2413         * into align above.
2414         */
2415        if (flags & SLAB_RED_ZONE) {
2416                /* add space for red zone words */
2417                cachep->obj_offset += sizeof(unsigned long long);
2418                size += 2 * sizeof(unsigned long long);
2419        }
2420        if (flags & SLAB_STORE_USER) {
2421                /* user store requires one word storage behind the end of
2422                 * the real object. But if the second red zone needs to be
2423                 * aligned to 64 bits, we must allow that much space.
2424                 */
2425                if (flags & SLAB_RED_ZONE)
2426                        size += REDZONE_ALIGN;
2427                else
2428                        size += BYTES_PER_WORD;
2429        }
2430#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2431        if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2432            && cachep->object_size > cache_line_size()
2433            && ALIGN(size, cachep->align) < PAGE_SIZE) {
2434                cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
2435                size = PAGE_SIZE;
2436        }
2437#endif
2438#endif
2439
2440        /*
2441         * Determine if the slab management is 'on' or 'off' slab.
2442         * (bootstrapping cannot cope with offslab caches so don't do
2443         * it too early on. Always use on-slab management when
2444         * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2445         */
2446        if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
2447            !(flags & SLAB_NOLEAKTRACE))
2448                /*
2449                 * Size is large, assume best to place the slab management obj
2450                 * off-slab (should allow better packing of objs).
2451                 */
2452                flags |= CFLGS_OFF_SLAB;
2453
2454        size = ALIGN(size, cachep->align);
2455
2456        left_over = calculate_slab_order(cachep, size, cachep->align, flags);
2457
2458        if (!cachep->num)
2459                return -E2BIG;
2460
2461        slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2462                          + sizeof(struct slab), cachep->align);
2463
2464        /*
2465         * If the slab has been placed off-slab, and we have enough space then
2466         * move it on-slab. This is at the expense of any extra colouring.
2467         */
2468        if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2469                flags &= ~CFLGS_OFF_SLAB;
2470                left_over -= slab_size;
2471        }
2472
2473        if (flags & CFLGS_OFF_SLAB) {
2474                /* really off slab. No need for manual alignment */
2475                slab_size =
2476                    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2477
2478#ifdef CONFIG_PAGE_POISONING
2479                /* If we're going to use the generic kernel_map_pages()
2480                 * poisoning, then it's going to smash the contents of
2481                 * the redzone and userword anyhow, so switch them off.
2482                 */
2483                if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2484                        flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2485#endif
2486        }
2487
2488        cachep->colour_off = cache_line_size();
2489        /* Offset must be a multiple of the alignment. */
2490        if (cachep->colour_off < cachep->align)
2491                cachep->colour_off = cachep->align;
2492        cachep->colour = left_over / cachep->colour_off;
2493        cachep->slab_size = slab_size;
2494        cachep->flags = flags;
2495        cachep->allocflags = 0;
2496        if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2497                cachep->allocflags |= GFP_DMA;
2498        cachep->size = size;
2499        cachep->reciprocal_buffer_size = reciprocal_value(size);
2500
2501        if (flags & CFLGS_OFF_SLAB) {
2502                cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2503                /*
2504                 * This is a possibility for one of the malloc_sizes caches.
2505                 * But since we go off slab only for object size greater than
2506                 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2507                 * this should not happen at all.
2508                 * But leave a BUG_ON for some lucky dude.
2509                 */
2510                BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2511        }
2512
2513        err = setup_cpu_cache(cachep, gfp);
2514        if (err) {
2515                __kmem_cache_shutdown(cachep);
2516                return err;
2517        }
2518
2519        if (flags & SLAB_DEBUG_OBJECTS) {
2520                /*
2521                 * Would deadlock through slab_destroy()->call_rcu()->
2522                 * debug_object_activate()->kmem_cache_alloc().
2523                 */
2524                WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2525
2526                slab_set_debugobj_lock_classes(cachep);
2527        } else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU))
2528                on_slab_lock_classes(cachep);
2529
2530        return 0;
2531}
2532
2533#if DEBUG
2534static void check_irq_off(void)
2535{
2536        BUG_ON(!irqs_disabled());
2537}
2538
2539static void check_irq_on(void)
2540{
2541        BUG_ON(irqs_disabled());
2542}
2543
2544static void check_spinlock_acquired(struct kmem_cache *cachep)
2545{
2546#ifdef CONFIG_SMP
2547        check_irq_off();
2548        assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock);
2549#endif
2550}
2551
2552static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2553{
2554#ifdef CONFIG_SMP
2555        check_irq_off();
2556        assert_spin_locked(&cachep->nodelists[node]->list_lock);
2557#endif
2558}
2559
2560#else
2561#define check_irq_off() do { } while(0)
2562#define check_irq_on()  do { } while(0)
2563#define check_spinlock_acquired(x) do { } while(0)
2564#define check_spinlock_acquired_node(x, y) do { } while(0)
2565#endif
2566
2567static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2568                        struct array_cache *ac,
2569                        int force, int node);
2570
2571static void do_drain(void *arg)
2572{
2573        struct kmem_cache *cachep = arg;
2574        struct array_cache *ac;
2575        int node = numa_mem_id();
2576
2577        check_irq_off();
2578        ac = cpu_cache_get(cachep);
2579        spin_lock(&cachep->nodelists[node]->list_lock);
2580        free_block(cachep, ac->entry, ac->avail, node);
2581        spin_unlock(&cachep->nodelists[node]->list_lock);
2582        ac->avail = 0;
2583}
2584
2585static void drain_cpu_caches(struct kmem_cache *cachep)
2586{
2587        struct kmem_list3 *l3;
2588        int node;
2589
2590        on_each_cpu(do_drain, cachep, 1);
2591        check_irq_on();
2592        for_each_online_node(node) {
2593                l3 = cachep->nodelists[node];
2594                if (l3 && l3->alien)
2595                        drain_alien_cache(cachep, l3->alien);
2596        }
2597
2598        for_each_online_node(node) {
2599                l3 = cachep->nodelists[node];
2600                if (l3)
2601                        drain_array(cachep, l3, l3->shared, 1, node);
2602        }
2603}
2604
2605/*
2606 * Remove slabs from the list of free slabs.
2607 * Specify the number of slabs to drain in tofree.
2608 *
2609 * Returns the actual number of slabs released.
2610 */
2611static int drain_freelist(struct kmem_cache *cache,
2612                        struct kmem_list3 *l3, int tofree)
2613{
2614        struct list_head *p;
2615        int nr_freed;
2616        struct slab *slabp;
2617
2618        nr_freed = 0;
2619        while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
2620
2621                spin_lock_irq(&l3->list_lock);
2622                p = l3->slabs_free.prev;
2623                if (p == &l3->slabs_free) {
2624                        spin_unlock_irq(&l3->list_lock);
2625                        goto out;
2626                }
2627
2628                slabp = list_entry(p, struct slab, list);
2629#if DEBUG
2630                BUG_ON(slabp->inuse);
2631#endif
2632                list_del(&slabp->list);
2633                /*
2634                 * Safe to drop the lock. The slab is no longer linked
2635                 * to the cache.
2636                 */
2637                l3->free_objects -= cache->num;
2638                spin_unlock_irq(&l3->list_lock);
2639                slab_destroy(cache, slabp);
2640                nr_freed++;
2641        }
2642out:
2643        return nr_freed;
2644}
2645
2646/* Called with slab_mutex held to protect against cpu hotplug */
2647static int __cache_shrink(struct kmem_cache *cachep)
2648{
2649        int ret = 0, i = 0;
2650        struct kmem_list3 *l3;
2651
2652        drain_cpu_caches(cachep);
2653
2654        check_irq_on();
2655        for_each_online_node(i) {
2656                l3 = cachep->nodelists[i];
2657                if (!l3)
2658                        continue;
2659
2660                drain_freelist(cachep, l3, l3->free_objects);
2661
2662                ret += !list_empty(&l3->slabs_full) ||
2663                        !list_empty(&l3->slabs_partial);
2664        }
2665        return (ret ? 1 : 0);
2666}
2667
2668/**
2669 * kmem_cache_shrink - Shrink a cache.
2670 * @cachep: The cache to shrink.
2671 *
2672 * Releases as many slabs as possible for a cache.
2673 * To help debugging, a zero exit status indicates all slabs were released.
2674 */
2675int kmem_cache_shrink(struct kmem_cache *cachep)
2676{
2677        int ret;
2678        BUG_ON(!cachep || in_interrupt());
2679
2680        get_online_cpus();
2681        mutex_lock(&slab_mutex);
2682        ret = __cache_shrink(cachep);
2683        mutex_unlock(&slab_mutex);
2684        put_online_cpus();
2685        return ret;
2686}
2687EXPORT_SYMBOL(kmem_cache_shrink);
2688
2689int __kmem_cache_shutdown(struct kmem_cache *cachep)
2690{
2691        int i;
2692        struct kmem_list3 *l3;
2693        int rc = __cache_shrink(cachep);
2694
2695        if (rc)
2696                return rc;
2697
2698        for_each_online_cpu(i)
2699            kfree(cachep->array[i]);
2700
2701        /* NUMA: free the list3 structures */
2702        for_each_online_node(i) {
2703                l3 = cachep->nodelists[i];
2704                if (l3) {
2705                        kfree(l3->shared);
2706                        free_alien_cache(l3->alien);
2707                        kfree(l3);
2708                }
2709        }
2710        return 0;
2711}
2712
2713/*
2714 * Get the memory for a slab management obj.
2715 * For a slab cache when the slab descriptor is off-slab, slab descriptors
2716 * always come from malloc_sizes caches.  The slab descriptor cannot
2717 * come from the same cache which is getting created because,
2718 * when we are searching for an appropriate cache for these
2719 * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2720 * If we are creating a malloc_sizes cache here it would not be visible to
2721 * kmem_find_general_cachep till the initialization is complete.
2722 * Hence we cannot have slabp_cache same as the original cache.
2723 */
2724static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2725                                   int colour_off, gfp_t local_flags,
2726                                   int nodeid)
2727{
2728        struct slab *slabp;
2729
2730        if (OFF_SLAB(cachep)) {
2731                /* Slab management obj is off-slab. */
2732                slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2733                                              local_flags, nodeid);
2734                /*
2735                 * If the first object in the slab is leaked (it's allocated
2736                 * but no one has a reference to it), we want to make sure
2737                 * kmemleak does not treat the ->s_mem pointer as a reference
2738                 * to the object. Otherwise we will not report the leak.
2739                 */
2740                kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
2741                                   local_flags);
2742                if (!slabp)
2743                        return NULL;
2744        } else {
2745                slabp = objp + colour_off;
2746                colour_off += cachep->slab_size;
2747        }
2748        slabp->inuse = 0;
2749        slabp->colouroff = colour_off;
2750        slabp->s_mem = objp + colour_off;
2751        slabp->nodeid = nodeid;
2752        slabp->free = 0;
2753        return slabp;
2754}
2755
2756static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2757{
2758        return (kmem_bufctl_t *) (slabp + 1);
2759}
2760
2761static void cache_init_objs(struct kmem_cache *cachep,
2762                            struct slab *slabp)
2763{
2764        int i;
2765
2766        for (i = 0; i < cachep->num; i++) {
2767                void *objp = index_to_obj(cachep, slabp, i);
2768#if DEBUG
2769                /* need to poison the objs? */
2770                if (cachep->flags & SLAB_POISON)
2771                        poison_obj(cachep, objp, POISON_FREE);
2772                if (cachep->flags & SLAB_STORE_USER)
2773                        *dbg_userword(cachep, objp) = NULL;
2774
2775                if (cachep->flags & SLAB_RED_ZONE) {
2776                        *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2777                        *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2778                }
2779                /*
2780                 * Constructors are not allowed to allocate memory from the same
2781                 * cache which they are a constructor for.  Otherwise, deadlock.
2782                 * They must also be threaded.
2783                 */
2784                if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2785                        cachep->ctor(objp + obj_offset(cachep));
2786
2787                if (cachep->flags & SLAB_RED_ZONE) {
2788                        if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2789                                slab_error(cachep, "constructor overwrote the"
2790                                           " end of an object");
2791                        if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2792                                slab_error(cachep, "constructor overwrote the"
2793                                           " start of an object");
2794                }
2795                if ((cachep->size % PAGE_SIZE) == 0 &&
2796                            OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2797                        kernel_map_pages(virt_to_page(objp),
2798                                         cachep->size / PAGE_SIZE, 0);
2799#else
2800                if (cachep->ctor)
2801                        cachep->ctor(objp);
2802#endif
2803                slab_bufctl(slabp)[i] = i + 1;
2804        }
2805        slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2806}
2807
2808static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2809{
2810        if (CONFIG_ZONE_DMA_FLAG) {
2811                if (flags & GFP_DMA)
2812                        BUG_ON(!(cachep->allocflags & GFP_DMA));
2813                else
2814                        BUG_ON(cachep->allocflags & GFP_DMA);
2815        }
2816}
2817
2818static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2819                                int nodeid)
2820{
2821        void *objp = index_to_obj(cachep, slabp, slabp->free);
2822        kmem_bufctl_t next;
2823
2824        slabp->inuse++;
2825        next = slab_bufctl(slabp)[slabp->free];
2826#if DEBUG
2827        slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2828        WARN_ON(slabp->nodeid != nodeid);
2829#endif
2830        slabp->free = next;
2831
2832        return objp;
2833}
2834
2835static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2836                                void *objp, int nodeid)
2837{
2838        unsigned int objnr = obj_to_index(cachep, slabp, objp);
2839
2840#if DEBUG
2841        /* Verify that the slab belongs to the intended node */
2842        WARN_ON(slabp->nodeid != nodeid);
2843
2844        if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2845                printk(KERN_ERR "slab: double free detected in cache "
2846                                "'%s', objp %p\n", cachep->name, objp);
2847                BUG();
2848        }
2849#endif
2850        slab_bufctl(slabp)[objnr] = slabp->free;
2851        slabp->free = objnr;
2852        slabp->inuse--;
2853}
2854
2855/*
2856 * Map pages beginning at addr to the given cache and slab. This is required
2857 * for the slab allocator to be able to lookup the cache and slab of a
2858 * virtual address for kfree, ksize, and slab debugging.
2859 */
2860static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2861                           void *addr)
2862{
2863        int nr_pages;
2864        struct page *page;
2865
2866        page = virt_to_page(addr);
2867
2868        nr_pages = 1;
2869        if (likely(!PageCompound(page)))
2870                nr_pages <<= cache->gfporder;
2871
2872        do {
2873                page->slab_cache = cache;
2874                page->slab_page = slab;
2875                page++;
2876        } while (--nr_pages);
2877}
2878
2879/*
2880 * Grow (by 1) the number of slabs within a cache.  This is called by
2881 * kmem_cache_alloc() when there are no active objs left in a cache.
2882 */
2883static int cache_grow(struct kmem_cache *cachep,
2884                gfp_t flags, int nodeid, void *objp)
2885{
2886        struct slab *slabp;
2887        size_t offset;
2888        gfp_t local_flags;
2889        struct kmem_list3 *l3;
2890
2891        /*
2892         * Be lazy and only check for valid flags here,  keeping it out of the
2893         * critical path in kmem_cache_alloc().
2894         */
2895        BUG_ON(flags & GFP_SLAB_BUG_MASK);
2896        local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2897
2898        /* Take the l3 list lock to change the colour_next on this node */
2899        check_irq_off();
2900        l3 = cachep->nodelists[nodeid];
2901        spin_lock(&l3->list_lock);
2902
2903        /* Get colour for the slab, and cal the next value. */
2904        offset = l3->colour_next;
2905        l3->colour_next++;
2906        if (l3->colour_next >= cachep->colour)
2907                l3->colour_next = 0;
2908        spin_unlock(&l3->list_lock);
2909
2910        offset *= cachep->colour_off;
2911
2912        if (local_flags & __GFP_WAIT)
2913                local_irq_enable();
2914
2915        /*
2916         * The test for missing atomic flag is performed here, rather than
2917         * the more obvious place, simply to reduce the critical path length
2918         * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2919         * will eventually be caught here (where it matters).
2920         */
2921        kmem_flagcheck(cachep, flags);
2922
2923        /*
2924         * Get mem for the objs.  Attempt to allocate a physical page from
2925         * 'nodeid'.
2926         */
2927        if (!objp)
2928                objp = kmem_getpages(cachep, local_flags, nodeid);
2929        if (!objp)
2930                goto failed;
2931
2932        /* Get slab management. */
2933        slabp = alloc_slabmgmt(cachep, objp, offset,
2934                        local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2935        if (!slabp)
2936                goto opps1;
2937
2938        slab_map_pages(cachep, slabp, objp);
2939
2940        cache_init_objs(cachep, slabp);
2941
2942        if (local_flags & __GFP_WAIT)
2943                local_irq_disable();
2944        check_irq_off();
2945        spin_lock(&l3->list_lock);
2946
2947        /* Make slab active. */
2948        list_add_tail(&slabp->list, &(l3->slabs_free));
2949        STATS_INC_GROWN(cachep);
2950        l3->free_objects += cachep->num;
2951        spin_unlock(&l3->list_lock);
2952        return 1;
2953opps1:
2954        kmem_freepages(cachep, objp);
2955failed:
2956        if (local_flags & __GFP_WAIT)
2957                local_irq_disable();
2958        return 0;
2959}
2960
2961#if DEBUG
2962
2963/*
2964 * Perform extra freeing checks:
2965 * - detect bad pointers.
2966 * - POISON/RED_ZONE checking
2967 */
2968static void kfree_debugcheck(const void *objp)
2969{
2970        if (!virt_addr_valid(objp)) {
2971                printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2972                       (unsigned long)objp);
2973                BUG();
2974        }
2975}
2976
2977static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2978{
2979        unsigned long long redzone1, redzone2;
2980
2981        redzone1 = *dbg_redzone1(cache, obj);
2982        redzone2 = *dbg_redzone2(cache, obj);
2983
2984        /*
2985         * Redzone is ok.
2986         */
2987        if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2988                return;
2989
2990        if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2991                slab_error(cache, "double free detected");
2992        else
2993                slab_error(cache, "memory outside object was overwritten");
2994
2995        printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2996                        obj, redzone1, redzone2);
2997}
2998
2999static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
3000                                   unsigned long caller)
3001{
3002        struct page *page;
3003        unsigned int objnr;
3004        struct slab *slabp;
3005
3006        BUG_ON(virt_to_cache(objp) != cachep);
3007
3008        objp -= obj_offset(cachep);
3009        kfree_debugcheck(objp);
3010        page = virt_to_head_page(objp);
3011
3012        slabp = page->slab_page;
3013
3014        if (cachep->flags & SLAB_RED_ZONE) {
3015                verify_redzone_free(cachep, objp);
3016                *dbg_redzone1(cachep, objp) = RED_INACTIVE;
3017                *dbg_redzone2(cachep, objp) = RED_INACTIVE;
3018        }
3019        if (cachep->flags & SLAB_STORE_USER)
3020                *dbg_userword(cachep, objp) = (void *)caller;
3021
3022        objnr = obj_to_index(cachep, slabp, objp);
3023
3024        BUG_ON(objnr >= cachep->num);
3025        BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
3026
3027#ifdef CONFIG_DEBUG_SLAB_LEAK
3028        slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
3029#endif
3030        if (cachep->flags & SLAB_POISON) {
3031#ifdef CONFIG_DEBUG_PAGEALLOC
3032                if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
3033                        store_stackinfo(cachep, objp, caller);
3034                        kernel_map_pages(virt_to_page(objp),
3035                                         cachep->size / PAGE_SIZE, 0);
3036                } else {
3037                        poison_obj(cachep, objp, POISON_FREE);
3038                }
3039#else
3040                poison_obj(cachep, objp, POISON_FREE);
3041#endif
3042        }
3043        return objp;
3044}
3045
3046static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
3047{
3048        kmem_bufctl_t i;
3049        int entries = 0;
3050
3051        /* Check slab's freelist to see if this obj is there. */
3052        for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
3053                entries++;
3054                if (entries > cachep->num || i >= cachep->num)
3055                        goto bad;
3056        }
3057        if (entries != cachep->num - slabp->inuse) {
3058bad:
3059                printk(KERN_ERR "slab: Internal list corruption detected in "
3060                        "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
3061                        cachep->name, cachep->num, slabp, slabp->inuse,
3062                        print_tainted());
3063                print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
3064                        sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
3065                        1);
3066                BUG();
3067        }
3068}
3069#else
3070#define kfree_debugcheck(x) do { } while(0)
3071#define cache_free_debugcheck(x,objp,z) (objp)
3072#define check_slabp(x,y) do { } while(0)
3073#endif
3074
3075static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
3076                                                        bool force_refill)
3077{
3078        int batchcount;
3079        struct kmem_list3 *l3;
3080        struct array_cache *ac;
3081        int node;
3082
3083        check_irq_off();
3084        node = numa_mem_id();
3085        if (unlikely(force_refill))
3086                goto force_grow;
3087retry:
3088        ac = cpu_cache_get(cachep);
3089        batchcount = ac->batchcount;
3090        if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
3091                /*
3092                 * If there was little recent activity on this cache, then
3093                 * perform only a partial refill.  Otherwise we could generate
3094                 * refill bouncing.
3095                 */
3096                batchcount = BATCHREFILL_LIMIT;
3097        }
3098        l3 = cachep->nodelists[node];
3099
3100        BUG_ON(ac->avail > 0 || !l3);
3101        spin_lock(&l3->list_lock);
3102
3103        /* See if we can refill from the shared array */
3104        if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
3105                l3->shared->touched = 1;
3106                goto alloc_done;
3107        }
3108
3109        while (batchcount > 0) {
3110                struct list_head *entry;
3111                struct slab *slabp;
3112                /* Get slab alloc is to come from. */
3113                entry = l3->slabs_partial.next;
3114                if (entry == &l3->slabs_partial) {
3115                        l3->free_touched = 1;
3116                        entry = l3->slabs_free.next;
3117                        if (entry == &l3->slabs_free)
3118                                goto must_grow;
3119                }
3120
3121                slabp = list_entry(entry, struct slab, list);
3122                check_slabp(cachep, slabp);
3123                check_spinlock_acquired(cachep);
3124
3125                /*
3126                 * The slab was either on partial or free list so
3127                 * there must be at least one object available for
3128                 * allocation.
3129                 */
3130                BUG_ON(slabp->inuse >= cachep->num);
3131
3132                while (slabp->inuse < cachep->num && batchcount--) {
3133                        STATS_INC_ALLOCED(cachep);
3134                        STATS_INC_ACTIVE(cachep);
3135                        STATS_SET_HIGH(cachep);
3136
3137                        ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp,
3138                                                                        node));
3139                }
3140                check_slabp(cachep, slabp);
3141
3142                /* move slabp to correct slabp list: */
3143                list_del(&slabp->list);
3144                if (slabp->free == BUFCTL_END)
3145                        list_add(&slabp->list, &l3->slabs_full);
3146                else
3147                        list_add(&slabp->list, &l3->slabs_partial);
3148        }
3149
3150must_grow:
3151        l3->free_objects -= ac->avail;
3152alloc_done:
3153        spin_unlock(&l3->list_lock);
3154
3155        if (unlikely(!ac->avail)) {
3156                int x;
3157force_grow:
3158                x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
3159
3160                /* cache_grow can reenable interrupts, then ac could change. */
3161                ac = cpu_cache_get(cachep);
3162                node = numa_mem_id();
3163
3164                /* no objects in sight? abort */
3165                if (!x && (ac->avail == 0 || force_refill))
3166                        return NULL;
3167
3168                if (!ac->avail)         /* objects refilled by interrupt? */
3169                        goto retry;
3170        }
3171        ac->touched = 1;
3172
3173        return ac_get_obj(cachep, ac, flags, force_refill);
3174}
3175
3176static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3177                                                gfp_t flags)
3178{
3179        might_sleep_if(flags & __GFP_WAIT);
3180#if DEBUG
3181        kmem_flagcheck(cachep, flags);
3182#endif
3183}
3184
3185#if DEBUG
3186static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3187                                gfp_t flags, void *objp, unsigned long caller)
3188{
3189        if (!objp)
3190                return objp;
3191        if (cachep->flags & SLAB_POISON) {
3192#ifdef CONFIG_DEBUG_PAGEALLOC
3193                if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
3194                        kernel_map_pages(virt_to_page(objp),
3195                                         cachep->size / PAGE_SIZE, 1);
3196                else
3197                        check_poison_obj(cachep, objp);
3198#else
3199                check_poison_obj(cachep, objp);
3200#endif
3201                poison_obj(cachep, objp, POISON_INUSE);
3202        }
3203        if (cachep->flags & SLAB_STORE_USER)
3204                *dbg_userword(cachep, objp) = (void *)caller;
3205
3206        if (cachep->flags & SLAB_RED_ZONE) {
3207                if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3208                                *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3209                        slab_error(cachep, "double free, or memory outside"
3210                                                " object was overwritten");
3211                        printk(KERN_ERR
3212                                "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3213                                objp, *dbg_redzone1(cachep, objp),
3214                                *dbg_redzone2(cachep, objp));
3215                }
3216                *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3217                *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3218        }
3219#ifdef CONFIG_DEBUG_SLAB_LEAK
3220        {
3221                struct slab *slabp;
3222                unsigned objnr;
3223
3224                slabp = virt_to_head_page(objp)->slab_page;
3225                objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
3226                slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3227        }
3228#endif
3229        objp += obj_offset(cachep);
3230        if (cachep->ctor && cachep->flags & SLAB_POISON)
3231                cachep->ctor(objp);
3232        if (ARCH_SLAB_MINALIGN &&
3233            ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3234                printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3235                       objp, (int)ARCH_SLAB_MINALIGN);
3236        }
3237        return objp;
3238}
3239#else
3240#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3241#endif
3242
3243static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3244{
3245        if (cachep == kmem_cache)
3246                return false;
3247
3248        return should_failslab(cachep->object_size, flags, cachep->flags);
3249}
3250
3251static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3252{
3253        void *objp;
3254        struct array_cache *ac;
3255        bool force_refill = false;
3256
3257        check_irq_off();
3258
3259        ac = cpu_cache_get(cachep);
3260        if (likely(ac->avail)) {
3261                ac->touched = 1;
3262                objp = ac_get_obj(cachep, ac, flags, false);
3263
3264                /*
3265                 * Allow for the possibility all avail objects are not allowed
3266                 * by the current flags
3267                 */
3268                if (objp) {
3269                        STATS_INC_ALLOCHIT(cachep);
3270                        goto out;
3271                }
3272                force_refill = true;
3273        }
3274
3275        STATS_INC_ALLOCMISS(cachep);
3276        objp = cache_alloc_refill(cachep, flags, force_refill);
3277        /*
3278         * the 'ac' may be updated by cache_alloc_refill(),
3279         * and kmemleak_erase() requires its correct value.
3280         */
3281        ac = cpu_cache_get(cachep);
3282
3283out:
3284        /*
3285         * To avoid a false negative, if an object that is in one of the
3286         * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3287         * treat the array pointers as a reference to the object.
3288         */
3289        if (objp)
3290                kmemleak_erase(&ac->entry[ac->avail]);
3291        return objp;
3292}
3293
3294#ifdef CONFIG_NUMA
3295/*
3296 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
3297 *
3298 * If we are in_interrupt, then process context, including cpusets and
3299 * mempolicy, may not apply and should not be used for allocation policy.
3300 */
3301static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3302{
3303        int nid_alloc, nid_here;
3304
3305        if (in_interrupt() || (flags & __GFP_THISNODE))
3306                return NULL;
3307        nid_alloc = nid_here = numa_mem_id();
3308        if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3309                nid_alloc = cpuset_slab_spread_node();
3310        else if (current->mempolicy)
3311                nid_alloc = slab_node();
3312        if (nid_alloc != nid_here)
3313                return ____cache_alloc_node(cachep, flags, nid_alloc);
3314        return NULL;
3315}
3316
3317/*
3318 * Fallback function if there was no memory available and no objects on a
3319 * certain node and fall back is permitted. First we scan all the
3320 * available nodelists for available objects. If that fails then we
3321 * perform an allocation without specifying a node. This allows the page
3322 * allocator to do its reclaim / fallback magic. We then insert the
3323 * slab into the proper nodelist and then allocate from it.
3324 */
3325static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3326{
3327        struct zonelist *zonelist;
3328        gfp_t local_flags;
3329        struct zoneref *z;
3330        struct zone *zone;
3331        enum zone_type high_zoneidx = gfp_zone(flags);
3332        void *obj = NULL;
3333        int nid;
3334        unsigned int cpuset_mems_cookie;
3335
3336        if (flags & __GFP_THISNODE)
3337                return NULL;
3338
3339        local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3340
3341retry_cpuset:
3342        cpuset_mems_cookie = get_mems_allowed();
3343        zonelist = node_zonelist(slab_node(), flags);
3344
3345retry:
3346        /*
3347         * Look through allowed nodes for objects available
3348         * from existing per node queues.
3349         */
3350        for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3351                nid = zone_to_nid(zone);
3352
3353                if (cpuset_zone_allowed_hardwall(zone, flags) &&
3354                        cache->nodelists[nid] &&
3355                        cache->nodelists[nid]->free_objects) {
3356                                obj = ____cache_alloc_node(cache,
3357                                        flags | GFP_THISNODE, nid);
3358                                if (obj)
3359                                        break;
3360                }
3361        }
3362
3363        if (!obj) {
3364                /*
3365                 * This allocation will be performed within the constraints
3366                 * of the current cpuset / memory policy requirements.
3367                 * We may trigger various forms of reclaim on the allowed
3368                 * set and go into memory reserves if necessary.
3369                 */
3370                if (local_flags & __GFP_WAIT)
3371                        local_irq_enable();
3372                kmem_flagcheck(cache, flags);
3373                obj = kmem_getpages(cache, local_flags, numa_mem_id());
3374                if (local_flags & __GFP_WAIT)
3375                        local_irq_disable();
3376                if (obj) {
3377                        /*
3378                         * Insert into the appropriate per node queues
3379                         */
3380                        nid = page_to_nid(virt_to_page(obj));
3381                        if (cache_grow(cache, flags, nid, obj)) {
3382                                obj = ____cache_alloc_node(cache,
3383                                        flags | GFP_THISNODE, nid);
3384                                if (!obj)
3385                                        /*
3386                                         * Another processor may allocate the
3387                                         * objects in the slab since we are
3388                                         * not holding any locks.
3389                                         */
3390                                        goto retry;
3391                        } else {
3392                                /* cache_grow already freed obj */
3393                                obj = NULL;
3394                        }
3395                }
3396        }
3397
3398        if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
3399                goto retry_cpuset;
3400        return obj;
3401}
3402
3403/*
3404 * A interface to enable slab creation on nodeid
3405 */
3406static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3407                                int nodeid)
3408{
3409        struct list_head *entry;
3410        struct slab *slabp;
3411        struct kmem_list3 *l3;
3412        void *obj;
3413        int x;
3414
3415        l3 = cachep->nodelists[nodeid];
3416        BUG_ON(!l3);
3417
3418retry:
3419        check_irq_off();
3420        spin_lock(&l3->list_lock);
3421        entry = l3->slabs_partial.next;
3422        if (entry == &l3->slabs_partial) {
3423                l3->free_touched = 1;
3424                entry = l3->slabs_free.next;
3425                if (entry == &l3->slabs_free)
3426                        goto must_grow;
3427        }
3428
3429        slabp = list_entry(entry, struct slab, list);
3430        check_spinlock_acquired_node(cachep, nodeid);
3431        check_slabp(cachep, slabp);
3432
3433        STATS_INC_NODEALLOCS(cachep);
3434        STATS_INC_ACTIVE(cachep);
3435        STATS_SET_HIGH(cachep);
3436
3437        BUG_ON(slabp->inuse == cachep->num);
3438
3439        obj = slab_get_obj(cachep, slabp, nodeid);
3440        check_slabp(cachep, slabp);
3441        l3->free_objects--;
3442        /* move slabp to correct slabp list: */
3443        list_del(&slabp->list);
3444
3445        if (slabp->free == BUFCTL_END)
3446                list_add(&slabp->list, &l3->slabs_full);
3447        else
3448                list_add(&slabp->list, &l3->slabs_partial);
3449
3450        spin_unlock(&l3->list_lock);
3451        goto done;
3452
3453must_grow:
3454        spin_unlock(&l3->list_lock);
3455        x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3456        if (x)
3457                goto retry;
3458
3459        return fallback_alloc(cachep, flags);
3460
3461done:
3462        return obj;
3463}
3464
3465/**
3466 * kmem_cache_alloc_node - Allocate an object on the specified node
3467 * @cachep: The cache to allocate from.
3468 * @flags: See kmalloc().
3469 * @nodeid: node number of the target node.
3470 * @caller: return address of caller, used for debug information
3471 *
3472 * Identical to kmem_cache_alloc but it will allocate memory on the given
3473 * node, which can improve the performance for cpu bound structures.
3474 *
3475 * Fallback to other node is possible if __GFP_THISNODE is not set.
3476 */
3477static __always_inline void *
3478slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3479                   unsigned long caller)
3480{
3481        unsigned long save_flags;
3482        void *ptr;
3483        int slab_node = numa_mem_id();
3484
3485        flags &= gfp_allowed_mask;
3486
3487        lockdep_trace_alloc(flags);
3488
3489        if (slab_should_failslab(cachep, flags))
3490                return NULL;
3491
3492        cachep = memcg_kmem_get_cache(cachep, flags);
3493
3494        cache_alloc_debugcheck_before(cachep, flags);
3495        local_irq_save(save_flags);
3496
3497        if (nodeid == NUMA_NO_NODE)
3498                nodeid = slab_node;
3499
3500        if (unlikely(!cachep->nodelists[nodeid])) {
3501                /* Node not bootstrapped yet */
3502                ptr = fallback_alloc(cachep, flags);
3503                goto out;
3504        }
3505
3506        if (nodeid == slab_node) {
3507                /*
3508                 * Use the locally cached objects if possible.
3509                 * However ____cache_alloc does not allow fallback
3510                 * to other nodes. It may fail while we still have
3511                 * objects on other nodes available.
3512                 */
3513                ptr = ____cache_alloc(cachep, flags);
3514                if (ptr)
3515                        goto out;
3516        }
3517        /* ___cache_alloc_node can fall back to other nodes */
3518        ptr = ____cache_alloc_node(cachep, flags, nodeid);
3519  out:
3520        local_irq_restore(save_flags);
3521        ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3522        kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
3523                                 flags);
3524
3525        if (likely(ptr))
3526                kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
3527
3528        if (unlikely((flags & __GFP_ZERO) && ptr))
3529                memset(ptr, 0, cachep->object_size);
3530
3531        return ptr;
3532}
3533
3534static __always_inline void *
3535__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3536{
3537        void *objp;
3538
3539        if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3540                objp = alternate_node_alloc(cache, flags);
3541                if (objp)
3542                        goto out;
3543        }
3544        objp = ____cache_alloc(cache, flags);
3545
3546        /*
3547         * We may just have run out of memory on the local node.
3548         * ____cache_alloc_node() knows how to locate memory on other nodes
3549         */
3550        if (!objp)
3551                objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3552
3553  out:
3554        return objp;
3555}
3556#else
3557
3558static __always_inline void *
3559__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3560{
3561        return ____cache_alloc(cachep, flags);
3562}
3563
3564#endif /* CONFIG_NUMA */
3565
3566static __always_inline void *
3567slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3568{
3569        unsigned long save_flags;
3570        void *objp;
3571
3572        flags &= gfp_allowed_mask;
3573
3574        lockdep_trace_alloc(flags);
3575
3576        if (slab_should_failslab(cachep, flags))
3577                return NULL;
3578
3579        cachep = memcg_kmem_get_cache(cachep, flags);
3580
3581        cache_alloc_debugcheck_before(cachep, flags);
3582        local_irq_save(save_flags);
3583        objp = __do_cache_alloc(cachep, flags);
3584        local_irq_restore(save_flags);
3585        objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3586        kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
3587                                 flags);
3588        prefetchw(objp);
3589
3590        if (likely(objp))
3591                kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
3592
3593        if (unlikely((flags & __GFP_ZERO) && objp))
3594                memset(objp, 0, cachep->object_size);
3595
3596        return objp;
3597}
3598
3599/*
3600 * Caller needs to acquire correct kmem_list's list_lock
3601 */
3602static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3603                       int node)
3604{
3605        int i;
3606        struct kmem_list3 *l3;
3607
3608        for (i = 0; i < nr_objects; i++) {
3609                void *objp;
3610                struct slab *slabp;
3611
3612                clear_obj_pfmemalloc(&objpp[i]);
3613                objp = objpp[i];
3614
3615                slabp = virt_to_slab(objp);
3616                l3 = cachep->nodelists[node];
3617                list_del(&slabp->list);
3618                check_spinlock_acquired_node(cachep, node);
3619                check_slabp(cachep, slabp);
3620                slab_put_obj(cachep, slabp, objp, node);
3621                STATS_DEC_ACTIVE(cachep);
3622                l3->free_objects++;
3623                check_slabp(cachep, slabp);
3624
3625                /* fixup slab chains */
3626                if (slabp->inuse == 0) {
3627                        if (l3->free_objects > l3->free_limit) {
3628                                l3->free_objects -= cachep->num;
3629                                /* No need to drop any previously held
3630                                 * lock here, even if we have a off-slab slab
3631                                 * descriptor it is guaranteed to come from
3632                                 * a different cache, refer to comments before
3633                                 * alloc_slabmgmt.
3634                                 */
3635                                slab_destroy(cachep, slabp);
3636                        } else {
3637                                list_add(&slabp->list, &l3->slabs_free);
3638                        }
3639                } else {
3640                        /* Unconditionally move a slab to the end of the
3641                         * partial list on free - maximum time for the
3642                         * other objects to be freed, too.
3643                         */
3644                        list_add_tail(&slabp->list, &l3->slabs_partial);
3645                }
3646        }
3647}
3648
3649static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3650{
3651        int batchcount;
3652        struct kmem_list3 *l3;
3653        int node = numa_mem_id();
3654
3655        batchcount = ac->batchcount;
3656#if DEBUG
3657        BUG_ON(!batchcount || batchcount > ac->avail);
3658#endif
3659        check_irq_off();
3660        l3 = cachep->nodelists[node];
3661        spin_lock(&l3->list_lock);
3662        if (l3->shared) {
3663                struct array_cache *shared_array = l3->shared;
3664                int max = shared_array->limit - shared_array->avail;
3665                if (max) {
3666                        if (batchcount > max)
3667                                batchcount = max;
3668                        memcpy(&(shared_array->entry[shared_array->avail]),
3669                               ac->entry, sizeof(void *) * batchcount);
3670                        shared_array->avail += batchcount;
3671                        goto free_done;
3672                }
3673        }
3674
3675        free_block(cachep, ac->entry, batchcount, node);
3676free_done:
3677#if STATS
3678        {
3679                int i = 0;
3680                struct list_head *p;
3681
3682                p = l3->slabs_free.next;
3683                while (p != &(l3->slabs_free)) {
3684                        struct slab *slabp;
3685
3686                        slabp = list_entry(p, struct slab, list);
3687                        BUG_ON(slabp->inuse);
3688
3689                        i++;
3690                        p = p->next;
3691                }
3692                STATS_SET_FREEABLE(cachep, i);
3693        }
3694#endif
3695        spin_unlock(&l3->list_lock);
3696        ac->avail -= batchcount;
3697        memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3698}
3699
3700/*
3701 * Release an obj back to its cache. If the obj has a constructed state, it must
3702 * be in this state _before_ it is released.  Called with disabled ints.
3703 */
3704static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3705                                unsigned long caller)
3706{
3707        struct array_cache *ac = cpu_cache_get(cachep);
3708
3709        check_irq_off();
3710        kmemleak_free_recursive(objp, cachep->flags);
3711        objp = cache_free_debugcheck(cachep, objp, caller);
3712
3713        kmemcheck_slab_free(cachep, objp, cachep->object_size);
3714
3715        /*
3716         * Skip calling cache_free_alien() when the platform is not numa.
3717         * This will avoid cache misses that happen while accessing slabp (which
3718         * is per page memory  reference) to get nodeid. Instead use a global
3719         * variable to skip the call, which is mostly likely to be present in
3720         * the cache.
3721         */
3722        if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3723                return;
3724
3725        if (likely(ac->avail < ac->limit)) {
3726                STATS_INC_FREEHIT(cachep);
3727        } else {
3728                STATS_INC_FREEMISS(cachep);
3729                cache_flusharray(cachep, ac);
3730        }
3731
3732        ac_put_obj(cachep, ac, objp);
3733}
3734
3735/**
3736 * kmem_cache_alloc - Allocate an object
3737 * @cachep: The cache to allocate from.
3738 * @flags: See kmalloc().
3739 *
3740 * Allocate an object from this cache.  The flags are only relevant
3741 * if the cache has no available objects.
3742 */
3743void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3744{
3745        void *ret = slab_alloc(cachep, flags, _RET_IP_);
3746
3747        trace_kmem_cache_alloc(_RET_IP_, ret,
3748                               cachep->object_size, cachep->size, flags);
3749
3750        return ret;
3751}
3752EXPORT_SYMBOL(kmem_cache_alloc);
3753
3754#ifdef CONFIG_TRACING
3755void *
3756kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3757{
3758        void *ret;
3759
3760        ret = slab_alloc(cachep, flags, _RET_IP_);
3761
3762        trace_kmalloc(_RET_IP_, ret,
3763                      size, cachep->size, flags);
3764        return ret;
3765}
3766EXPORT_SYMBOL(kmem_cache_alloc_trace);
3767#endif
3768
3769#ifdef CONFIG_NUMA
3770void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3771{
3772        void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3773
3774        trace_kmem_cache_alloc_node(_RET_IP_, ret,
3775                                    cachep->object_size, cachep->size,
3776                                    flags, nodeid);
3777
3778        return ret;
3779}
3780EXPORT_SYMBOL(kmem_cache_alloc_node);
3781
3782#ifdef CONFIG_TRACING
3783void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3784                                  gfp_t flags,
3785                                  int nodeid,
3786                                  size_t size)
3787{
3788        void *ret;
3789
3790        ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3791
3792        trace_kmalloc_node(_RET_IP_, ret,
3793                           size, cachep->size,
3794                           flags, nodeid);
3795        return ret;
3796}
3797EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3798#endif
3799
3800static __always_inline void *
3801__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3802{
3803        struct kmem_cache *cachep;
3804
3805        cachep = kmem_find_general_cachep(size, flags);
3806        if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3807                return cachep;
3808        return kmem_cache_alloc_node_trace(cachep, flags, node, size);
3809}
3810
3811#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3812void *__kmalloc_node(size_t size, gfp_t flags, int node)
3813{
3814        return __do_kmalloc_node(size, flags, node, _RET_IP_);
3815}
3816EXPORT_SYMBOL(__kmalloc_node);
3817
3818void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3819                int node, unsigned long caller)
3820{
3821        return __do_kmalloc_node(size, flags, node, caller);
3822}
3823EXPORT_SYMBOL(__kmalloc_node_track_caller);
3824#else
3825void *__kmalloc_node(size_t size, gfp_t flags, int node)
3826{
3827        return __do_kmalloc_node(size, flags, node, 0);
3828}
3829EXPORT_SYMBOL(__kmalloc_node);
3830#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3831#endif /* CONFIG_NUMA */
3832
3833/**
3834 * __do_kmalloc - allocate memory
3835 * @size: how many bytes of memory are required.
3836 * @flags: the type of memory to allocate (see kmalloc).
3837 * @caller: function caller for debug tracking of the caller
3838 */
3839static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3840                                          unsigned long caller)
3841{
3842        struct kmem_cache *cachep;
3843        void *ret;
3844
3845        /* If you want to save a few bytes .text space: replace
3846         * __ with kmem_.
3847         * Then kmalloc uses the uninlined functions instead of the inline
3848         * functions.
3849         */
3850        cachep = __find_general_cachep(size, flags);
3851        if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3852                return cachep;
3853        ret = slab_alloc(cachep, flags, caller);
3854
3855        trace_kmalloc(caller, ret,
3856                      size, cachep->size, flags);
3857
3858        return ret;
3859}
3860
3861
3862#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3863void *__kmalloc(size_t size, gfp_t flags)
3864{
3865        return __do_kmalloc(size, flags, _RET_IP_);
3866}
3867EXPORT_SYMBOL(__kmalloc);
3868
3869void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3870{
3871        return __do_kmalloc(size, flags, caller);
3872}
3873EXPORT_SYMBOL(__kmalloc_track_caller);
3874
3875#else
3876void *__kmalloc(size_t size, gfp_t flags)
3877{
3878        return __do_kmalloc(size, flags, 0);
3879}
3880EXPORT_SYMBOL(__kmalloc);
3881#endif
3882
3883/**
3884 * kmem_cache_free - Deallocate an object
3885 * @cachep: The cache the allocation was from.
3886 * @objp: The previously allocated object.
3887 *
3888 * Free an object which was previously allocated from this
3889 * cache.
3890 */
3891void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3892{
3893        unsigned long flags;
3894        cachep = cache_from_obj(cachep, objp);
3895        if (!cachep)
3896                return;
3897
3898        local_irq_save(flags);
3899        debug_check_no_locks_freed(objp, cachep->object_size);
3900        if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3901                debug_check_no_obj_freed(objp, cachep->object_size);
3902        __cache_free(cachep, objp, _RET_IP_);
3903        local_irq_restore(flags);
3904
3905        trace_kmem_cache_free(_RET_IP_, objp);
3906}
3907EXPORT_SYMBOL(kmem_cache_free);
3908
3909/**
3910 * kfree - free previously allocated memory
3911 * @objp: pointer returned by kmalloc.
3912 *
3913 * If @objp is NULL, no operation is performed.
3914 *
3915 * Don't free memory not originally allocated by kmalloc()
3916 * or you will run into trouble.
3917 */
3918void kfree(const void *objp)
3919{
3920        struct kmem_cache *c;
3921        unsigned long flags;
3922
3923        trace_kfree(_RET_IP_, objp);
3924
3925        if (unlikely(ZERO_OR_NULL_PTR(objp)))
3926                return;
3927        local_irq_save(flags);
3928        kfree_debugcheck(objp);
3929        c = virt_to_cache(objp);
3930        debug_check_no_locks_freed(objp, c->object_size);
3931
3932        debug_check_no_obj_freed(objp, c->object_size);
3933        __cache_free(c, (void *)objp, _RET_IP_);
3934        local_irq_restore(flags);
3935}
3936EXPORT_SYMBOL(kfree);
3937
3938/*
3939 * This initializes kmem_list3 or resizes various caches for all nodes.
3940 */
3941static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3942{
3943        int node;
3944        struct kmem_list3 *l3;
3945        struct array_cache *new_shared;
3946        struct array_cache **new_alien = NULL;
3947
3948        for_each_online_node(node) {
3949
3950                if (use_alien_caches) {
3951                        new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3952                        if (!new_alien)
3953                                goto fail;
3954                }
3955
3956                new_shared = NULL;
3957                if (cachep->shared) {
3958                        new_shared = alloc_arraycache(node,
3959                                cachep->shared*cachep->batchcount,
3960                                        0xbaadf00d, gfp);
3961                        if (!new_shared) {
3962                                free_alien_cache(new_alien);
3963                                goto fail;
3964                        }
3965                }
3966
3967                l3 = cachep->nodelists[node];
3968                if (l3) {
3969                        struct array_cache *shared = l3->shared;
3970
3971                        spin_lock_irq(&l3->list_lock);
3972
3973                        if (shared)
3974                                free_block(cachep, shared->entry,
3975                                                shared->avail, node);
3976
3977                        l3->shared = new_shared;
3978                        if (!l3->alien) {
3979                                l3->alien = new_alien;
3980                                new_alien = NULL;
3981                        }
3982                        l3->free_limit = (1 + nr_cpus_node(node)) *
3983                                        cachep->batchcount + cachep->num;
3984                        spin_unlock_irq(&l3->list_lock);
3985                        kfree(shared);
3986                        free_alien_cache(new_alien);
3987                        continue;
3988                }
3989                l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
3990                if (!l3) {
3991                        free_alien_cache(new_alien);
3992                        kfree(new_shared);
3993                        goto fail;
3994                }
3995
3996                kmem_list3_init(l3);
3997                l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3998                                ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3999                l3->shared = new_shared;
4000                l3->alien = new_alien;
4001                l3->free_limit = (1 + nr_cpus_node(node)) *
4002                                        cachep->batchcount + cachep->num;
4003                cachep->nodelists[node] = l3;
4004        }
4005        return 0;
4006
4007fail:
4008        if (!cachep->list.next) {
4009                /* Cache is not active yet. Roll back what we did */
4010                node--;
4011                while (node >= 0) {
4012                        if (cachep->nodelists[node]) {
4013                                l3 = cachep->nodelists[node];
4014
4015                                kfree(l3->shared);
4016                                free_alien_cache(l3->alien);
4017                                kfree(l3);
4018                                cachep->nodelists[node] = NULL;
4019                        }
4020                        node--;
4021                }
4022        }
4023        return -ENOMEM;
4024}
4025
4026struct ccupdate_struct {
4027        struct kmem_cache *cachep;
4028        struct array_cache *new[0];
4029};
4030
4031static void do_ccupdate_local(void *info)
4032{
4033        struct ccupdate_struct *new = info;
4034        struct array_cache *old;
4035
4036        check_irq_off();
4037        old = cpu_cache_get(new->cachep);
4038
4039        new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
4040        new->new[smp_processor_id()] = old;
4041}
4042
4043/* Always called with the slab_mutex held */
4044static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
4045                                int batchcount, int shared, gfp_t gfp)
4046{
4047        struct ccupdate_struct *new;
4048        int i;
4049
4050        new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
4051                      gfp);
4052        if (!new)
4053                return -ENOMEM;
4054
4055        for_each_online_cpu(i) {
4056                new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
4057                                                batchcount, gfp);
4058                if (!new->new[i]) {
4059                        for (i--; i >= 0; i--)
4060                                kfree(new->new[i]);
4061                        kfree(new);
4062                        return -ENOMEM;
4063                }
4064        }
4065        new->cachep = cachep;
4066
4067        on_each_cpu(do_ccupdate_local, (void *)new, 1);
4068
4069        check_irq_on();
4070        cachep->batchcount = batchcount;
4071        cachep->limit = limit;
4072        cachep->shared = shared;
4073
4074        for_each_online_cpu(i) {
4075                struct array_cache *ccold = new->new[i];
4076                if (!ccold)
4077                        continue;
4078                spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4079                free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
4080                spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4081                kfree(ccold);
4082        }
4083        kfree(new);
4084        return alloc_kmemlist(cachep, gfp);
4085}
4086
4087static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4088                                int batchcount, int shared, gfp_t gfp)
4089{
4090        int ret;
4091        struct kmem_cache *c = NULL;
4092        int i = 0;
4093
4094        ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
4095
4096        if (slab_state < FULL)
4097                return ret;
4098
4099        if ((ret < 0) || !is_root_cache(cachep))
4100                return ret;
4101
4102        VM_BUG_ON(!mutex_is_locked(&slab_mutex));
4103        for_each_memcg_cache_index(i) {
4104                c = cache_from_memcg(cachep, i);
4105                if (c)
4106                        /* return value determined by the parent cache only */
4107                        __do_tune_cpucache(c, limit, batchcount, shared, gfp);
4108        }
4109
4110        return ret;
4111}
4112
4113/* Called with slab_mutex held always */
4114static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4115{
4116        int err;
4117        int limit = 0;
4118        int shared = 0;
4119        int batchcount = 0;
4120
4121        if (!is_root_cache(cachep)) {
4122                struct kmem_cache *root = memcg_root_cache(cachep);
4123                limit = root->limit;
4124                shared = root->shared;
4125                batchcount = root->batchcount;
4126        }
4127
4128        if (limit && shared && batchcount)
4129                goto skip_setup;
4130        /*
4131         * The head array serves three purposes:
4132         * - create a LIFO ordering, i.e. return objects that are cache-warm
4133         * - reduce the number of spinlock operations.
4134         * - reduce the number of linked list operations on the slab and
4135         *   bufctl chains: array operations are cheaper.
4136         * The numbers are guessed, we should auto-tune as described by
4137         * Bonwick.
4138         */
4139        if (cachep->size > 131072)
4140                limit = 1;
4141        else if (cachep->size > PAGE_SIZE)
4142                limit = 8;
4143        else if (cachep->size > 1024)
4144                limit = 24;
4145        else if (cachep->size > 256)
4146                limit = 54;
4147        else
4148                limit = 120;
4149
4150        /*
4151         * CPU bound tasks (e.g. network routing) can exhibit cpu bound
4152         * allocation behaviour: Most allocs on one cpu, most free operations
4153         * on another cpu. For these cases, an efficient object passing between
4154         * cpus is necessary. This is provided by a shared array. The array
4155         * replaces Bonwick's magazine layer.
4156         * On uniprocessor, it's functionally equivalent (but less efficient)
4157         * to a larger limit. Thus disabled by default.
4158         */
4159        shared = 0;
4160        if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
4161                shared = 8;
4162
4163#if DEBUG
4164        /*
4165         * With debugging enabled, large batchcount lead to excessively long
4166         * periods with disabled local interrupts. Limit the batchcount
4167         */
4168        if (limit > 32)
4169                limit = 32;
4170#endif
4171        batchcount = (limit + 1) / 2;
4172skip_setup:
4173        err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
4174        if (err)
4175                printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4176                       cachep->name, -err);
4177        return err;
4178}
4179
4180/*
4181 * Drain an array if it contains any elements taking the l3 lock only if
4182 * necessary. Note that the l3 listlock also protects the array_cache
4183 * if drain_array() is used on the shared array.
4184 */
4185static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4186                         struct array_cache *ac, int force, int node)
4187{
4188        int tofree;
4189
4190        if (!ac || !ac->avail)
4191                return;
4192        if (ac->touched && !force) {
4193                ac->touched = 0;
4194        } else {
4195                spin_lock_irq(&l3->list_lock);
4196                if (ac->avail) {
4197                        tofree = force ? ac->avail : (ac->limit + 4) / 5;
4198                        if (tofree > ac->avail)
4199                                tofree = (ac->avail + 1) / 2;
4200                        free_block(cachep, ac->entry, tofree, node);
4201                        ac->avail -= tofree;
4202                        memmove(ac->entry, &(ac->entry[tofree]),
4203                                sizeof(void *) * ac->avail);
4204                }
4205                spin_unlock_irq(&l3->list_lock);
4206        }
4207}
4208
4209/**
4210 * cache_reap - Reclaim memory from caches.
4211 * @w: work descriptor
4212 *
4213 * Called from workqueue/eventd every few seconds.
4214 * Purpose:
4215 * - clear the per-cpu caches for this CPU.
4216 * - return freeable pages to the main free memory pool.
4217 *
4218 * If we cannot acquire the cache chain mutex then just give up - we'll try
4219 * again on the next iteration.
4220 */
4221static void cache_reap(struct work_struct *w)
4222{
4223        struct kmem_cache *searchp;
4224        struct kmem_list3 *l3;
4225        int node = numa_mem_id();
4226        struct delayed_work *work = to_delayed_work(w);
4227
4228        if (!mutex_trylock(&slab_mutex))
4229                /* Give up. Setup the next iteration. */
4230                goto out;
4231
4232        list_for_each_entry(searchp, &slab_caches, list) {
4233                check_irq_on();
4234
4235                /*
4236                 * We only take the l3 lock if absolutely necessary and we
4237                 * have established with reasonable certainty that
4238                 * we can do some work if the lock was obtained.
4239                 */
4240                l3 = searchp->nodelists[node];
4241
4242                reap_alien(searchp, l3);
4243
4244                drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
4245
4246                /*
4247                 * These are racy checks but it does not matter
4248                 * if we skip one check or scan twice.
4249                 */
4250                if (time_after(l3->next_reap, jiffies))
4251                        goto next;
4252
4253                l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
4254
4255                drain_array(searchp, l3, l3->shared, 0, node);
4256
4257                if (l3->free_touched)
4258                        l3->free_touched = 0;
4259                else {
4260                        int freed;
4261
4262                        freed = drain_freelist(searchp, l3, (l3->free_limit +
4263                                5 * searchp->num - 1) / (5 * searchp->num));
4264                        STATS_ADD_REAPED(searchp, freed);
4265                }
4266next:
4267                cond_resched();
4268        }
4269        check_irq_on();
4270        mutex_unlock(&slab_mutex);
4271        next_reap_node();
4272out:
4273        /* Set up the next iteration */
4274        schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
4275}
4276
4277#ifdef CONFIG_SLABINFO
4278void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4279{
4280        struct slab *slabp;
4281        unsigned long active_objs;
4282        unsigned long num_objs;
4283        unsigned long active_slabs = 0;
4284        unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4285        const char *name;
4286        char *error = NULL;
4287        int node;
4288        struct kmem_list3 *l3;
4289
4290        active_objs = 0;
4291        num_slabs = 0;
4292        for_each_online_node(node) {
4293                l3 = cachep->nodelists[node];
4294                if (!l3)
4295                        continue;
4296
4297                check_irq_on();
4298                spin_lock_irq(&l3->list_lock);
4299
4300                list_for_each_entry(slabp, &l3->slabs_full, list) {
4301                        if (slabp->inuse != cachep->num && !error)
4302                                error = "slabs_full accounting error";
4303                        active_objs += cachep->num;
4304                        active_slabs++;
4305                }
4306                list_for_each_entry(slabp, &l3->slabs_partial, list) {
4307                        if (slabp->inuse == cachep->num && !error)
4308                                error = "slabs_partial inuse accounting error";
4309                        if (!slabp->inuse && !error)
4310                                error = "slabs_partial/inuse accounting error";
4311                        active_objs += slabp->inuse;
4312                        active_slabs++;
4313                }
4314                list_for_each_entry(slabp, &l3->slabs_free, list) {
4315                        if (slabp->inuse && !error)
4316                                error = "slabs_free/inuse accounting error";
4317                        num_slabs++;
4318                }
4319                free_objects += l3->free_objects;
4320                if (l3->shared)
4321                        shared_avail += l3->shared->avail;
4322
4323                spin_unlock_irq(&l3->list_lock);
4324        }
4325        num_slabs += active_slabs;
4326        num_objs = num_slabs * cachep->num;
4327        if (num_objs - active_objs != free_objects && !error)
4328                error = "free_objects accounting error";
4329
4330        name = cachep->name;
4331        if (error)
4332                printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4333
4334        sinfo->active_objs = active_objs;
4335        sinfo->num_objs = num_objs;
4336        sinfo->active_slabs = active_slabs;
4337        sinfo->num_slabs = num_slabs;
4338        sinfo->shared_avail = shared_avail;
4339        sinfo->limit = cachep->limit;
4340        sinfo->batchcount = cachep->batchcount;
4341        sinfo->shared = cachep->shared;
4342        sinfo->objects_per_slab = cachep->num;
4343        sinfo->cache_order = cachep->gfporder;
4344}
4345
4346void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4347{
4348#if STATS
4349        {                       /* list3 stats */
4350                unsigned long high = cachep->high_mark;
4351                unsigned long allocs = cachep->num_allocations;
4352                unsigned long grown = cachep->grown;
4353                unsigned long reaped = cachep->reaped;
4354                unsigned long errors = cachep->errors;
4355                unsigned long max_freeable = cachep->max_freeable;
4356                unsigned long node_allocs = cachep->node_allocs;
4357                unsigned long node_frees = cachep->node_frees;
4358                unsigned long overflows = cachep->node_overflow;
4359
4360                seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
4361                           "%4lu %4lu %4lu %4lu %4lu",
4362                           allocs, high, grown,
4363                           reaped, errors, max_freeable, node_allocs,
4364                           node_frees, overflows);
4365        }
4366        /* cpu stats */
4367        {
4368                unsigned long allochit = atomic_read(&cachep->allochit);
4369                unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4370                unsigned long freehit = atomic_read(&cachep->freehit);
4371                unsigned long freemiss = atomic_read(&cachep->freemiss);
4372
4373                seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4374                           allochit, allocmiss, freehit, freemiss);
4375        }
4376#endif
4377}
4378
4379#define MAX_SLABINFO_WRITE 128
4380/**
4381 * slabinfo_write - Tuning for the slab allocator
4382 * @file: unused
4383 * @buffer: user buffer
4384 * @count: data length
4385 * @ppos: unused
4386 */
4387ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4388                       size_t count, loff_t *ppos)
4389{
4390        char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4391        int limit, batchcount, shared, res;
4392        struct kmem_cache *cachep;
4393
4394        if (count > MAX_SLABINFO_WRITE)
4395                return -EINVAL;
4396        if (copy_from_user(&kbuf, buffer, count))
4397                return -EFAULT;
4398        kbuf[MAX_SLABINFO_WRITE] = '\0';
4399
4400        tmp = strchr(kbuf, ' ');
4401        if (!tmp)
4402                return -EINVAL;
4403        *tmp = '\0';
4404        tmp++;
4405        if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4406                return -EINVAL;
4407
4408        /* Find the cache in the chain of caches. */
4409        mutex_lock(&slab_mutex);
4410        res = -EINVAL;
4411        list_for_each_entry(cachep, &slab_caches, list) {
4412                if (!strcmp(cachep->name, kbuf)) {
4413                        if (limit < 1 || batchcount < 1 ||
4414                                        batchcount > limit || shared < 0) {
4415                                res = 0;
4416                        } else {
4417                                res = do_tune_cpucache(cachep, limit,
4418                                                       batchcount, shared,
4419                                                       GFP_KERNEL);
4420                        }
4421                        break;
4422                }
4423        }
4424        mutex_unlock(&slab_mutex);
4425        if (res >= 0)
4426                res = count;
4427        return res;
4428}
4429
4430#ifdef CONFIG_DEBUG_SLAB_LEAK
4431
4432static void *leaks_start(struct seq_file *m, loff_t *pos)
4433{
4434        mutex_lock(&slab_mutex);
4435        return seq_list_start(&slab_caches, *pos);
4436}
4437
4438static inline int add_caller(unsigned long *n, unsigned long v)
4439{
4440        unsigned long *p;
4441        int l;
4442        if (!v)
4443                return 1;
4444        l = n[1];
4445        p = n + 2;
4446        while (l) {
4447                int i = l/2;
4448                unsigned long *q = p + 2 * i;
4449                if (*q == v) {
4450                        q[1]++;
4451                        return 1;
4452                }
4453                if (*q > v) {
4454                        l = i;
4455                } else {
4456                        p = q + 2;
4457                        l -= i + 1;
4458                }
4459        }
4460        if (++n[1] == n[0])
4461                return 0;
4462        memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4463        p[0] = v;
4464        p[1] = 1;
4465        return 1;
4466}
4467
4468static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4469{
4470        void *p;
4471        int i;
4472        if (n[0] == n[1])
4473                return;
4474        for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) {
4475                if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4476                        continue;
4477                if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4478                        return;
4479        }
4480}
4481
4482static void show_symbol(struct seq_file *m, unsigned long address)
4483{
4484#ifdef CONFIG_KALLSYMS
4485        unsigned long offset, size;
4486        char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4487
4488        if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4489                seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4490                if (modname[0])
4491                        seq_printf(m, " [%s]", modname);
4492                return;
4493        }
4494#endif
4495        seq_printf(m, "%p", (void *)address);
4496}
4497
4498static int leaks_show(struct seq_file *m, void *p)
4499{
4500        struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4501        struct slab *slabp;
4502        struct kmem_list3 *l3;
4503        const char *name;
4504        unsigned long *n = m->private;
4505        int node;
4506        int i;
4507
4508        if (!(cachep->flags & SLAB_STORE_USER))
4509                return 0;
4510        if (!(cachep->flags & SLAB_RED_ZONE))
4511                return 0;
4512
4513        /* OK, we can do it */
4514
4515        n[1] = 0;
4516
4517        for_each_online_node(node) {
4518                l3 = cachep->nodelists[node];
4519                if (!l3)
4520                        continue;
4521
4522                check_irq_on();
4523                spin_lock_irq(&l3->list_lock);
4524
4525                list_for_each_entry(slabp, &l3->slabs_full, list)
4526                        handle_slab(n, cachep, slabp);
4527                list_for_each_entry(slabp, &l3->slabs_partial, list)
4528                        handle_slab(n, cachep, slabp);
4529                spin_unlock_irq(&l3->list_lock);
4530        }
4531        name = cachep->name;
4532        if (n[0] == n[1]) {
4533                /* Increase the buffer size */
4534                mutex_unlock(&slab_mutex);
4535                m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4536                if (!m->private) {
4537                        /* Too bad, we are really out */
4538                        m->private = n;
4539                        mutex_lock(&slab_mutex);
4540                        return -ENOMEM;
4541                }
4542                *(unsigned long *)m->private = n[0] * 2;
4543                kfree(n);
4544                mutex_lock(&slab_mutex);
4545                /* Now make sure this entry will be retried */
4546                m->count = m->size;
4547                return 0;
4548        }
4549        for (i = 0; i < n[1]; i++) {
4550                seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4551                show_symbol(m, n[2*i+2]);
4552                seq_putc(m, '\n');
4553        }
4554
4555        return 0;
4556}
4557
4558static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4559{
4560        return seq_list_next(p, &slab_caches, pos);
4561}
4562
4563static void s_stop(struct seq_file *m, void *p)
4564{
4565        mutex_unlock(&slab_mutex);
4566}
4567
4568static const struct seq_operations slabstats_op = {
4569        .start = leaks_start,
4570        .next = s_next,
4571        .stop = s_stop,
4572        .show = leaks_show,
4573};
4574
4575static int slabstats_open(struct inode *inode, struct file *file)
4576{
4577        unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
4578        int ret = -ENOMEM;
4579        if (n) {
4580                ret = seq_open(file, &slabstats_op);
4581                if (!ret) {
4582                        struct seq_file *m = file->private_data;
4583                        *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4584                        m->private = n;
4585                        n = NULL;
4586                }
4587                kfree(n);
4588        }
4589        return ret;
4590}
4591
4592static const struct file_operations proc_slabstats_operations = {
4593        .open           = slabstats_open,
4594        .read           = seq_read,
4595        .llseek         = seq_lseek,
4596        .release        = seq_release_private,
4597};
4598#endif
4599
4600static int __init slab_proc_init(void)
4601{
4602#ifdef CONFIG_DEBUG_SLAB_LEAK
4603        proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4604#endif
4605        return 0;
4606}
4607module_init(slab_proc_init);
4608#endif
4609
4610/**
4611 * ksize - get the actual amount of memory allocated for a given object
4612 * @objp: Pointer to the object
4613 *
4614 * kmalloc may internally round up allocations and return more memory
4615 * than requested. ksize() can be used to determine the actual amount of
4616 * memory allocated. The caller may use this additional memory, even though
4617 * a smaller amount of memory was initially specified with the kmalloc call.
4618 * The caller must guarantee that objp points to a valid object previously
4619 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4620 * must not be freed during the duration of the call.
4621 */
4622size_t ksize(const void *objp)
4623{
4624        BUG_ON(!objp);
4625        if (unlikely(objp == ZERO_SIZE_PTR))
4626                return 0;
4627
4628        return virt_to_cache(objp)->object_size;
4629}
4630EXPORT_SYMBOL(ksize);
4631