linux/include/linux/slab.h
<<
>>
Prefs
   1/*
   2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
   3 *
   4 * (C) SGI 2006, Christoph Lameter
   5 *      Cleaned up and restructured to ease the addition of alternative
   6 *      implementations of SLAB allocators.
   7 * (C) Linux Foundation 2008-2013
   8 *      Unified interface for all slab allocators
   9 */
  10
  11#ifndef _LINUX_SLAB_H
  12#define _LINUX_SLAB_H
  13
  14#include <linux/gfp.h>
  15#include <linux/types.h>
  16#include <linux/workqueue.h>
  17
  18
  19/*
  20 * Flags to pass to kmem_cache_create().
  21 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
  22 */
  23#define SLAB_CONSISTENCY_CHECKS 0x00000100UL    /* DEBUG: Perform (expensive) checks on alloc/free */
  24#define SLAB_RED_ZONE           0x00000400UL    /* DEBUG: Red zone objs in a cache */
  25#define SLAB_POISON             0x00000800UL    /* DEBUG: Poison objects */
  26#define SLAB_HWCACHE_ALIGN      0x00002000UL    /* Align objs on cache lines */
  27#define SLAB_CACHE_DMA          0x00004000UL    /* Use GFP_DMA memory */
  28#define SLAB_STORE_USER         0x00010000UL    /* DEBUG: Store the last owner for bug hunting */
  29#define SLAB_PANIC              0x00040000UL    /* Panic if kmem_cache_create() fails */
  30/*
  31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
  32 *
  33 * This delays freeing the SLAB page by a grace period, it does _NOT_
  34 * delay object freeing. This means that if you do kmem_cache_free()
  35 * that memory location is free to be reused at any time. Thus it may
  36 * be possible to see another object there in the same RCU grace period.
  37 *
  38 * This feature only ensures the memory location backing the object
  39 * stays valid, the trick to using this is relying on an independent
  40 * object validation pass. Something like:
  41 *
  42 *  rcu_read_lock()
  43 * again:
  44 *  obj = lockless_lookup(key);
  45 *  if (obj) {
  46 *    if (!try_get_ref(obj)) // might fail for free objects
  47 *      goto again;
  48 *
  49 *    if (obj->key != key) { // not the object we expected
  50 *      put_ref(obj);
  51 *      goto again;
  52 *    }
  53 *  }
  54 *  rcu_read_unlock();
  55 *
  56 * This is useful if we need to approach a kernel structure obliquely,
  57 * from its address obtained without the usual locking. We can lock
  58 * the structure to stabilize it and check it's still at the given address,
  59 * only if we can be sure that the memory has not been meanwhile reused
  60 * for some other kind of object (which our subsystem's lock might corrupt).
  61 *
  62 * rcu_read_lock before reading the address, then rcu_read_unlock after
  63 * taking the spinlock within the structure expected at that address.
  64 */
  65#define SLAB_DESTROY_BY_RCU     0x00080000UL    /* Defer freeing slabs to RCU */
  66#define SLAB_MEM_SPREAD         0x00100000UL    /* Spread some memory over cpuset */
  67#define SLAB_TRACE              0x00200000UL    /* Trace allocations and frees */
  68
  69/* Flag to prevent checks on free */
  70#ifdef CONFIG_DEBUG_OBJECTS
  71# define SLAB_DEBUG_OBJECTS     0x00400000UL
  72#else
  73# define SLAB_DEBUG_OBJECTS     0x00000000UL
  74#endif
  75
  76#define SLAB_NOLEAKTRACE        0x00800000UL    /* Avoid kmemleak tracing */
  77
  78/* Don't track use of uninitialized memory */
  79#ifdef CONFIG_KMEMCHECK
  80# define SLAB_NOTRACK           0x01000000UL
  81#else
  82# define SLAB_NOTRACK           0x00000000UL
  83#endif
  84#ifdef CONFIG_FAILSLAB
  85# define SLAB_FAILSLAB          0x02000000UL    /* Fault injection mark */
  86#else
  87# define SLAB_FAILSLAB          0x00000000UL
  88#endif
  89#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
  90# define SLAB_ACCOUNT           0x04000000UL    /* Account to memcg */
  91#else
  92# define SLAB_ACCOUNT           0x00000000UL
  93#endif
  94
  95#ifdef CONFIG_KASAN
  96#define SLAB_KASAN              0x08000000UL
  97#else
  98#define SLAB_KASAN              0x00000000UL
  99#endif
 100
 101/* The following flags affect the page allocator grouping pages by mobility */
 102#define SLAB_RECLAIM_ACCOUNT    0x00020000UL            /* Objects are reclaimable */
 103#define SLAB_TEMPORARY          SLAB_RECLAIM_ACCOUNT    /* Objects are short-lived */
 104/*
 105 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
 106 *
 107 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
 108 *
 109 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
 110 * Both make kfree a no-op.
 111 */
 112#define ZERO_SIZE_PTR ((void *)16)
 113
 114#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
 115                                (unsigned long)ZERO_SIZE_PTR)
 116
 117#include <linux/kmemleak.h>
 118#include <linux/kasan.h>
 119
 120struct mem_cgroup;
 121/*
 122 * struct kmem_cache related prototypes
 123 */
 124void __init kmem_cache_init(void);
 125bool slab_is_available(void);
 126
 127struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
 128                        unsigned long,
 129                        void (*)(void *));
 130void kmem_cache_destroy(struct kmem_cache *);
 131int kmem_cache_shrink(struct kmem_cache *);
 132
 133void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
 134void memcg_deactivate_kmem_caches(struct mem_cgroup *);
 135void memcg_destroy_kmem_caches(struct mem_cgroup *);
 136
 137/*
 138 * Please use this macro to create slab caches. Simply specify the
 139 * name of the structure and maybe some flags that are listed above.
 140 *
 141 * The alignment of the struct determines object alignment. If you
 142 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
 143 * then the objects will be properly aligned in SMP configurations.
 144 */
 145#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
 146                sizeof(struct __struct), __alignof__(struct __struct),\
 147                (__flags), NULL)
 148
 149/*
 150 * Common kmalloc functions provided by all allocators
 151 */
 152void * __must_check __krealloc(const void *, size_t, gfp_t);
 153void * __must_check krealloc(const void *, size_t, gfp_t);
 154void kfree(const void *);
 155void kzfree(const void *);
 156size_t ksize(const void *);
 157
 158/*
 159 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 160 * alignment larger than the alignment of a 64-bit integer.
 161 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
 162 */
 163#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
 164#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
 165#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
 166#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
 167#else
 168#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
 169#endif
 170
 171/*
 172 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
 173 * Intended for arches that get misalignment faults even for 64 bit integer
 174 * aligned buffers.
 175 */
 176#ifndef ARCH_SLAB_MINALIGN
 177#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
 178#endif
 179
 180/*
 181 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
 182 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
 183 * aligned pointers.
 184 */
 185#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
 186#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
 187#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
 188
 189/*
 190 * Kmalloc array related definitions
 191 */
 192
 193#ifdef CONFIG_SLAB
 194/*
 195 * The largest kmalloc size supported by the SLAB allocators is
 196 * 32 megabyte (2^25) or the maximum allocatable page order if that is
 197 * less than 32 MB.
 198 *
 199 * WARNING: Its not easy to increase this value since the allocators have
 200 * to do various tricks to work around compiler limitations in order to
 201 * ensure proper constant folding.
 202 */
 203#define KMALLOC_SHIFT_HIGH      ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
 204                                (MAX_ORDER + PAGE_SHIFT - 1) : 25)
 205#define KMALLOC_SHIFT_MAX       KMALLOC_SHIFT_HIGH
 206#ifndef KMALLOC_SHIFT_LOW
 207#define KMALLOC_SHIFT_LOW       5
 208#endif
 209#endif
 210
 211#ifdef CONFIG_SLUB
 212/*
 213 * SLUB directly allocates requests fitting in to an order-1 page
 214 * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
 215 */
 216#define KMALLOC_SHIFT_HIGH      (PAGE_SHIFT + 1)
 217#define KMALLOC_SHIFT_MAX       (MAX_ORDER + PAGE_SHIFT)
 218#ifndef KMALLOC_SHIFT_LOW
 219#define KMALLOC_SHIFT_LOW       3
 220#endif
 221#endif
 222
 223#ifdef CONFIG_SLOB
 224/*
 225 * SLOB passes all requests larger than one page to the page allocator.
 226 * No kmalloc array is necessary since objects of different sizes can
 227 * be allocated from the same page.
 228 */
 229#define KMALLOC_SHIFT_HIGH      PAGE_SHIFT
 230#define KMALLOC_SHIFT_MAX       30
 231#ifndef KMALLOC_SHIFT_LOW
 232#define KMALLOC_SHIFT_LOW       3
 233#endif
 234#endif
 235
 236/* Maximum allocatable size */
 237#define KMALLOC_MAX_SIZE        (1UL << KMALLOC_SHIFT_MAX)
 238/* Maximum size for which we actually use a slab cache */
 239#define KMALLOC_MAX_CACHE_SIZE  (1UL << KMALLOC_SHIFT_HIGH)
 240/* Maximum order allocatable via the slab allocagtor */
 241#define KMALLOC_MAX_ORDER       (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
 242
 243/*
 244 * Kmalloc subsystem.
 245 */
 246#ifndef KMALLOC_MIN_SIZE
 247#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
 248#endif
 249
 250/*
 251 * This restriction comes from byte sized index implementation.
 252 * Page size is normally 2^12 bytes and, in this case, if we want to use
 253 * byte sized index which can represent 2^8 entries, the size of the object
 254 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
 255 * If minimum size of kmalloc is less than 16, we use it as minimum object
 256 * size and give up to use byte sized index.
 257 */
 258#define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
 259                               (KMALLOC_MIN_SIZE) : 16)
 260
 261#ifndef CONFIG_SLOB
 262extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
 263#ifdef CONFIG_ZONE_DMA
 264extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
 265#endif
 266
 267/*
 268 * Figure out which kmalloc slab an allocation of a certain size
 269 * belongs to.
 270 * 0 = zero alloc
 271 * 1 =  65 .. 96 bytes
 272 * 2 = 129 .. 192 bytes
 273 * n = 2^(n-1)+1 .. 2^n
 274 */
 275static __always_inline int kmalloc_index(size_t size)
 276{
 277        if (!size)
 278                return 0;
 279
 280        if (size <= KMALLOC_MIN_SIZE)
 281                return KMALLOC_SHIFT_LOW;
 282
 283        if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
 284                return 1;
 285        if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
 286                return 2;
 287        if (size <=          8) return 3;
 288        if (size <=         16) return 4;
 289        if (size <=         32) return 5;
 290        if (size <=         64) return 6;
 291        if (size <=        128) return 7;
 292        if (size <=        256) return 8;
 293        if (size <=        512) return 9;
 294        if (size <=       1024) return 10;
 295        if (size <=   2 * 1024) return 11;
 296        if (size <=   4 * 1024) return 12;
 297        if (size <=   8 * 1024) return 13;
 298        if (size <=  16 * 1024) return 14;
 299        if (size <=  32 * 1024) return 15;
 300        if (size <=  64 * 1024) return 16;
 301        if (size <= 128 * 1024) return 17;
 302        if (size <= 256 * 1024) return 18;
 303        if (size <= 512 * 1024) return 19;
 304        if (size <= 1024 * 1024) return 20;
 305        if (size <=  2 * 1024 * 1024) return 21;
 306        if (size <=  4 * 1024 * 1024) return 22;
 307        if (size <=  8 * 1024 * 1024) return 23;
 308        if (size <=  16 * 1024 * 1024) return 24;
 309        if (size <=  32 * 1024 * 1024) return 25;
 310        if (size <=  64 * 1024 * 1024) return 26;
 311        BUG();
 312
 313        /* Will never be reached. Needed because the compiler may complain */
 314        return -1;
 315}
 316#endif /* !CONFIG_SLOB */
 317
 318void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
 319void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
 320void kmem_cache_free(struct kmem_cache *, void *);
 321
 322/*
 323 * Bulk allocation and freeing operations. These are accelerated in an
 324 * allocator specific way to avoid taking locks repeatedly or building
 325 * metadata structures unnecessarily.
 326 *
 327 * Note that interrupts must be enabled when calling these functions.
 328 */
 329void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
 330int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
 331
 332/*
 333 * Caller must not use kfree_bulk() on memory not originally allocated
 334 * by kmalloc(), because the SLOB allocator cannot handle this.
 335 */
 336static __always_inline void kfree_bulk(size_t size, void **p)
 337{
 338        kmem_cache_free_bulk(NULL, size, p);
 339}
 340
 341#ifdef CONFIG_NUMA
 342void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
 343void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
 344#else
 345static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
 346{
 347        return __kmalloc(size, flags);
 348}
 349
 350static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
 351{
 352        return kmem_cache_alloc(s, flags);
 353}
 354#endif
 355
 356#ifdef CONFIG_TRACING
 357extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
 358
 359#ifdef CONFIG_NUMA
 360extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
 361                                           gfp_t gfpflags,
 362                                           int node, size_t size) __assume_slab_alignment;
 363#else
 364static __always_inline void *
 365kmem_cache_alloc_node_trace(struct kmem_cache *s,
 366                              gfp_t gfpflags,
 367                              int node, size_t size)
 368{
 369        return kmem_cache_alloc_trace(s, gfpflags, size);
 370}
 371#endif /* CONFIG_NUMA */
 372
 373#else /* CONFIG_TRACING */
 374static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
 375                gfp_t flags, size_t size)
 376{
 377        void *ret = kmem_cache_alloc(s, flags);
 378
 379        kasan_kmalloc(s, ret, size, flags);
 380        return ret;
 381}
 382
 383static __always_inline void *
 384kmem_cache_alloc_node_trace(struct kmem_cache *s,
 385                              gfp_t gfpflags,
 386                              int node, size_t size)
 387{
 388        void *ret = kmem_cache_alloc_node(s, gfpflags, node);
 389
 390        kasan_kmalloc(s, ret, size, gfpflags);
 391        return ret;
 392}
 393#endif /* CONFIG_TRACING */
 394
 395extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
 396
 397#ifdef CONFIG_TRACING
 398extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
 399#else
 400static __always_inline void *
 401kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
 402{
 403        return kmalloc_order(size, flags, order);
 404}
 405#endif
 406
 407static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
 408{
 409        unsigned int order = get_order(size);
 410        return kmalloc_order_trace(size, flags, order);
 411}
 412
 413/**
 414 * kmalloc - allocate memory
 415 * @size: how many bytes of memory are required.
 416 * @flags: the type of memory to allocate.
 417 *
 418 * kmalloc is the normal method of allocating memory
 419 * for objects smaller than page size in the kernel.
 420 *
 421 * The @flags argument may be one of:
 422 *
 423 * %GFP_USER - Allocate memory on behalf of user.  May sleep.
 424 *
 425 * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
 426 *
 427 * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
 428 *   For example, use this inside interrupt handlers.
 429 *
 430 * %GFP_HIGHUSER - Allocate pages from high memory.
 431 *
 432 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
 433 *
 434 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
 435 *
 436 * %GFP_NOWAIT - Allocation will not sleep.
 437 *
 438 * %__GFP_THISNODE - Allocate node-local memory only.
 439 *
 440 * %GFP_DMA - Allocation suitable for DMA.
 441 *   Should only be used for kmalloc() caches. Otherwise, use a
 442 *   slab created with SLAB_DMA.
 443 *
 444 * Also it is possible to set different flags by OR'ing
 445 * in one or more of the following additional @flags:
 446 *
 447 * %__GFP_COLD - Request cache-cold pages instead of
 448 *   trying to return cache-warm pages.
 449 *
 450 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
 451 *
 452 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
 453 *   (think twice before using).
 454 *
 455 * %__GFP_NORETRY - If memory is not immediately available,
 456 *   then give up at once.
 457 *
 458 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
 459 *
 460 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
 461 *
 462 * There are other flags available as well, but these are not intended
 463 * for general use, and so are not documented here. For a full list of
 464 * potential flags, always refer to linux/gfp.h.
 465 */
 466static __always_inline void *kmalloc(size_t size, gfp_t flags)
 467{
 468        if (__builtin_constant_p(size)) {
 469                if (size > KMALLOC_MAX_CACHE_SIZE)
 470                        return kmalloc_large(size, flags);
 471#ifndef CONFIG_SLOB
 472                if (!(flags & GFP_DMA)) {
 473                        int index = kmalloc_index(size);
 474
 475                        if (!index)
 476                                return ZERO_SIZE_PTR;
 477
 478                        return kmem_cache_alloc_trace(kmalloc_caches[index],
 479                                        flags, size);
 480                }
 481#endif
 482        }
 483        return __kmalloc(size, flags);
 484}
 485
 486/*
 487 * Determine size used for the nth kmalloc cache.
 488 * return size or 0 if a kmalloc cache for that
 489 * size does not exist
 490 */
 491static __always_inline int kmalloc_size(int n)
 492{
 493#ifndef CONFIG_SLOB
 494        if (n > 2)
 495                return 1 << n;
 496
 497        if (n == 1 && KMALLOC_MIN_SIZE <= 32)
 498                return 96;
 499
 500        if (n == 2 && KMALLOC_MIN_SIZE <= 64)
 501                return 192;
 502#endif
 503        return 0;
 504}
 505
 506static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 507{
 508#ifndef CONFIG_SLOB
 509        if (__builtin_constant_p(size) &&
 510                size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
 511                int i = kmalloc_index(size);
 512
 513                if (!i)
 514                        return ZERO_SIZE_PTR;
 515
 516                return kmem_cache_alloc_node_trace(kmalloc_caches[i],
 517                                                flags, node, size);
 518        }
 519#endif
 520        return __kmalloc_node(size, flags, node);
 521}
 522
 523struct memcg_cache_array {
 524        struct rcu_head rcu;
 525        struct kmem_cache *entries[0];
 526};
 527
 528/*
 529 * This is the main placeholder for memcg-related information in kmem caches.
 530 * Both the root cache and the child caches will have it. For the root cache,
 531 * this will hold a dynamically allocated array large enough to hold
 532 * information about the currently limited memcgs in the system. To allow the
 533 * array to be accessed without taking any locks, on relocation we free the old
 534 * version only after a grace period.
 535 *
 536 * Child caches will hold extra metadata needed for its operation. Fields are:
 537 *
 538 * @memcg: pointer to the memcg this cache belongs to
 539 * @root_cache: pointer to the global, root cache, this cache was derived from
 540 *
 541 * Both root and child caches of the same kind are linked into a list chained
 542 * through @list.
 543 */
 544struct memcg_cache_params {
 545        bool is_root_cache;
 546        struct list_head list;
 547        union {
 548                struct memcg_cache_array __rcu *memcg_caches;
 549                struct {
 550                        struct mem_cgroup *memcg;
 551                        struct kmem_cache *root_cache;
 552                };
 553        };
 554};
 555
 556int memcg_update_all_caches(int num_memcgs);
 557
 558/**
 559 * kmalloc_array - allocate memory for an array.
 560 * @n: number of elements.
 561 * @size: element size.
 562 * @flags: the type of memory to allocate (see kmalloc).
 563 */
 564static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
 565{
 566        if (size != 0 && n > SIZE_MAX / size)
 567                return NULL;
 568        return __kmalloc(n * size, flags);
 569}
 570
 571/**
 572 * kcalloc - allocate memory for an array. The memory is set to zero.
 573 * @n: number of elements.
 574 * @size: element size.
 575 * @flags: the type of memory to allocate (see kmalloc).
 576 */
 577static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
 578{
 579        return kmalloc_array(n, size, flags | __GFP_ZERO);
 580}
 581
 582/*
 583 * kmalloc_track_caller is a special version of kmalloc that records the
 584 * calling function of the routine calling it for slab leak tracking instead
 585 * of just the calling function (confusing, eh?).
 586 * It's useful when the call to kmalloc comes from a widely-used standard
 587 * allocator where we care about the real place the memory allocation
 588 * request comes from.
 589 */
 590extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
 591#define kmalloc_track_caller(size, flags) \
 592        __kmalloc_track_caller(size, flags, _RET_IP_)
 593
 594#ifdef CONFIG_NUMA
 595extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
 596#define kmalloc_node_track_caller(size, flags, node) \
 597        __kmalloc_node_track_caller(size, flags, node, \
 598                        _RET_IP_)
 599
 600#else /* CONFIG_NUMA */
 601
 602#define kmalloc_node_track_caller(size, flags, node) \
 603        kmalloc_track_caller(size, flags)
 604
 605#endif /* CONFIG_NUMA */
 606
 607/*
 608 * Shortcuts
 609 */
 610static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
 611{
 612        return kmem_cache_alloc(k, flags | __GFP_ZERO);
 613}
 614
 615/**
 616 * kzalloc - allocate memory. The memory is set to zero.
 617 * @size: how many bytes of memory are required.
 618 * @flags: the type of memory to allocate (see kmalloc).
 619 */
 620static inline void *kzalloc(size_t size, gfp_t flags)
 621{
 622        return kmalloc(size, flags | __GFP_ZERO);
 623}
 624
 625/**
 626 * kzalloc_node - allocate zeroed memory from a particular memory node.
 627 * @size: how many bytes of memory are required.
 628 * @flags: the type of memory to allocate (see kmalloc).
 629 * @node: memory node from which to allocate
 630 */
 631static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
 632{
 633        return kmalloc_node(size, flags | __GFP_ZERO, node);
 634}
 635
 636unsigned int kmem_cache_size(struct kmem_cache *s);
 637void __init kmem_cache_init_late(void);
 638
 639#endif  /* _LINUX_SLAB_H */
 640