linux/include/linux/slab.h
<<
>>
Prefs
   1/*
   2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
   3 *
   4 * (C) SGI 2006, Christoph Lameter
   5 *      Cleaned up and restructured to ease the addition of alternative
   6 *      implementations of SLAB allocators.
   7 * (C) Linux Foundation 2008-2013
   8 *      Unified interface for all slab allocators
   9 */
  10
  11#ifndef _LINUX_SLAB_H
  12#define _LINUX_SLAB_H
  13
  14#include <linux/gfp.h>
  15#include <linux/types.h>
  16#include <linux/workqueue.h>
  17
  18
  19/*
  20 * Flags to pass to kmem_cache_create().
  21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
  22 */
  23#define SLAB_DEBUG_FREE         0x00000100UL    /* DEBUG: Perform (expensive) checks on free */
  24#define SLAB_RED_ZONE           0x00000400UL    /* DEBUG: Red zone objs in a cache */
  25#define SLAB_POISON             0x00000800UL    /* DEBUG: Poison objects */
  26#define SLAB_HWCACHE_ALIGN      0x00002000UL    /* Align objs on cache lines */
  27#define SLAB_CACHE_DMA          0x00004000UL    /* Use GFP_DMA memory */
  28#define SLAB_STORE_USER         0x00010000UL    /* DEBUG: Store the last owner for bug hunting */
  29#define SLAB_PANIC              0x00040000UL    /* Panic if kmem_cache_create() fails */
  30/*
  31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
  32 *
  33 * This delays freeing the SLAB page by a grace period, it does _NOT_
  34 * delay object freeing. This means that if you do kmem_cache_free()
  35 * that memory location is free to be reused at any time. Thus it may
  36 * be possible to see another object there in the same RCU grace period.
  37 *
  38 * This feature only ensures the memory location backing the object
  39 * stays valid, the trick to using this is relying on an independent
  40 * object validation pass. Something like:
  41 *
  42 *  rcu_read_lock()
  43 * again:
  44 *  obj = lockless_lookup(key);
  45 *  if (obj) {
  46 *    if (!try_get_ref(obj)) // might fail for free objects
  47 *      goto again;
  48 *
  49 *    if (obj->key != key) { // not the object we expected
  50 *      put_ref(obj);
  51 *      goto again;
  52 *    }
  53 *  }
  54 *  rcu_read_unlock();
  55 *
  56 * This is useful if we need to approach a kernel structure obliquely,
  57 * from its address obtained without the usual locking. We can lock
  58 * the structure to stabilize it and check it's still at the given address,
  59 * only if we can be sure that the memory has not been meanwhile reused
  60 * for some other kind of object (which our subsystem's lock might corrupt).
  61 *
  62 * rcu_read_lock before reading the address, then rcu_read_unlock after
  63 * taking the spinlock within the structure expected at that address.
  64 */
  65#define SLAB_DESTROY_BY_RCU     0x00080000UL    /* Defer freeing slabs to RCU */
  66#define SLAB_MEM_SPREAD         0x00100000UL    /* Spread some memory over cpuset */
  67#define SLAB_TRACE              0x00200000UL    /* Trace allocations and frees */
  68
  69/* Flag to prevent checks on free */
  70#ifdef CONFIG_DEBUG_OBJECTS
  71# define SLAB_DEBUG_OBJECTS     0x00400000UL
  72#else
  73# define SLAB_DEBUG_OBJECTS     0x00000000UL
  74#endif
  75
  76#define SLAB_NOLEAKTRACE        0x00800000UL    /* Avoid kmemleak tracing */
  77
  78/* Don't track use of uninitialized memory */
  79#ifdef CONFIG_KMEMCHECK
  80# define SLAB_NOTRACK           0x01000000UL
  81#else
  82# define SLAB_NOTRACK           0x00000000UL
  83#endif
  84#ifdef CONFIG_FAILSLAB
  85# define SLAB_FAILSLAB          0x02000000UL    /* Fault injection mark */
  86#else
  87# define SLAB_FAILSLAB          0x00000000UL
  88#endif
  89
  90/* The following flags affect the page allocator grouping pages by mobility */
  91#define SLAB_RECLAIM_ACCOUNT    0x00020000UL            /* Objects are reclaimable */
  92#define SLAB_TEMPORARY          SLAB_RECLAIM_ACCOUNT    /* Objects are short-lived */
  93/*
  94 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
  95 *
  96 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
  97 *
  98 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
  99 * Both make kfree a no-op.
 100 */
 101#define ZERO_SIZE_PTR ((void *)16)
 102
 103#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
 104                                (unsigned long)ZERO_SIZE_PTR)
 105
 106#include <linux/kmemleak.h>
 107
 108struct mem_cgroup;
 109/*
 110 * struct kmem_cache related prototypes
 111 */
 112void __init kmem_cache_init(void);
 113int slab_is_available(void);
 114
 115struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
 116                        unsigned long,
 117                        void (*)(void *));
 118#ifdef CONFIG_MEMCG_KMEM
 119void kmem_cache_create_memcg(struct mem_cgroup *, struct kmem_cache *);
 120#endif
 121void kmem_cache_destroy(struct kmem_cache *);
 122int kmem_cache_shrink(struct kmem_cache *);
 123void kmem_cache_free(struct kmem_cache *, void *);
 124
 125/*
 126 * Please use this macro to create slab caches. Simply specify the
 127 * name of the structure and maybe some flags that are listed above.
 128 *
 129 * The alignment of the struct determines object alignment. If you
 130 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
 131 * then the objects will be properly aligned in SMP configurations.
 132 */
 133#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
 134                sizeof(struct __struct), __alignof__(struct __struct),\
 135                (__flags), NULL)
 136
 137/*
 138 * Common kmalloc functions provided by all allocators
 139 */
 140void * __must_check __krealloc(const void *, size_t, gfp_t);
 141void * __must_check krealloc(const void *, size_t, gfp_t);
 142void kfree(const void *);
 143void kzfree(const void *);
 144size_t ksize(const void *);
 145
 146/*
 147 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 148 * alignment larger than the alignment of a 64-bit integer.
 149 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
 150 */
 151#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
 152#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
 153#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
 154#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
 155#else
 156#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
 157#endif
 158
 159#ifdef CONFIG_SLOB
 160/*
 161 * Common fields provided in kmem_cache by all slab allocators
 162 * This struct is either used directly by the allocator (SLOB)
 163 * or the allocator must include definitions for all fields
 164 * provided in kmem_cache_common in their definition of kmem_cache.
 165 *
 166 * Once we can do anonymous structs (C11 standard) we could put a
 167 * anonymous struct definition in these allocators so that the
 168 * separate allocations in the kmem_cache structure of SLAB and
 169 * SLUB is no longer needed.
 170 */
 171struct kmem_cache {
 172        unsigned int object_size;/* The original size of the object */
 173        unsigned int size;      /* The aligned/padded/added on size  */
 174        unsigned int align;     /* Alignment as calculated */
 175        unsigned long flags;    /* Active flags on the slab */
 176        const char *name;       /* Slab name for sysfs */
 177        int refcount;           /* Use counter */
 178        void (*ctor)(void *);   /* Called on object slot creation */
 179        struct list_head list;  /* List of all slab caches on the system */
 180};
 181
 182#endif /* CONFIG_SLOB */
 183
 184/*
 185 * Kmalloc array related definitions
 186 */
 187
 188#ifdef CONFIG_SLAB
 189/*
 190 * The largest kmalloc size supported by the SLAB allocators is
 191 * 32 megabyte (2^25) or the maximum allocatable page order if that is
 192 * less than 32 MB.
 193 *
 194 * WARNING: Its not easy to increase this value since the allocators have
 195 * to do various tricks to work around compiler limitations in order to
 196 * ensure proper constant folding.
 197 */
 198#define KMALLOC_SHIFT_HIGH      ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
 199                                (MAX_ORDER + PAGE_SHIFT - 1) : 25)
 200#define KMALLOC_SHIFT_MAX       KMALLOC_SHIFT_HIGH
 201#ifndef KMALLOC_SHIFT_LOW
 202#define KMALLOC_SHIFT_LOW       5
 203#endif
 204#endif
 205
 206#ifdef CONFIG_SLUB
 207/*
 208 * SLUB directly allocates requests fitting in to an order-1 page
 209 * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
 210 */
 211#define KMALLOC_SHIFT_HIGH      (PAGE_SHIFT + 1)
 212#define KMALLOC_SHIFT_MAX       (MAX_ORDER + PAGE_SHIFT)
 213#ifndef KMALLOC_SHIFT_LOW
 214#define KMALLOC_SHIFT_LOW       3
 215#endif
 216#endif
 217
 218#ifdef CONFIG_SLOB
 219/*
 220 * SLOB passes all requests larger than one page to the page allocator.
 221 * No kmalloc array is necessary since objects of different sizes can
 222 * be allocated from the same page.
 223 */
 224#define KMALLOC_SHIFT_HIGH      PAGE_SHIFT
 225#define KMALLOC_SHIFT_MAX       30
 226#ifndef KMALLOC_SHIFT_LOW
 227#define KMALLOC_SHIFT_LOW       3
 228#endif
 229#endif
 230
 231/* Maximum allocatable size */
 232#define KMALLOC_MAX_SIZE        (1UL << KMALLOC_SHIFT_MAX)
 233/* Maximum size for which we actually use a slab cache */
 234#define KMALLOC_MAX_CACHE_SIZE  (1UL << KMALLOC_SHIFT_HIGH)
 235/* Maximum order allocatable via the slab allocagtor */
 236#define KMALLOC_MAX_ORDER       (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
 237
 238/*
 239 * Kmalloc subsystem.
 240 */
 241#ifndef KMALLOC_MIN_SIZE
 242#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
 243#endif
 244
 245/*
 246 * This restriction comes from byte sized index implementation.
 247 * Page size is normally 2^12 bytes and, in this case, if we want to use
 248 * byte sized index which can represent 2^8 entries, the size of the object
 249 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
 250 * If minimum size of kmalloc is less than 16, we use it as minimum object
 251 * size and give up to use byte sized index.
 252 */
 253#define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
 254                               (KMALLOC_MIN_SIZE) : 16)
 255
 256#ifndef CONFIG_SLOB
 257extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
 258#ifdef CONFIG_ZONE_DMA
 259extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
 260#endif
 261
 262/*
 263 * Figure out which kmalloc slab an allocation of a certain size
 264 * belongs to.
 265 * 0 = zero alloc
 266 * 1 =  65 .. 96 bytes
 267 * 2 = 120 .. 192 bytes
 268 * n = 2^(n-1) .. 2^n -1
 269 */
 270static __always_inline int kmalloc_index(size_t size)
 271{
 272        if (!size)
 273                return 0;
 274
 275        if (size <= KMALLOC_MIN_SIZE)
 276                return KMALLOC_SHIFT_LOW;
 277
 278        if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
 279                return 1;
 280        if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
 281                return 2;
 282        if (size <=          8) return 3;
 283        if (size <=         16) return 4;
 284        if (size <=         32) return 5;
 285        if (size <=         64) return 6;
 286        if (size <=        128) return 7;
 287        if (size <=        256) return 8;
 288        if (size <=        512) return 9;
 289        if (size <=       1024) return 10;
 290        if (size <=   2 * 1024) return 11;
 291        if (size <=   4 * 1024) return 12;
 292        if (size <=   8 * 1024) return 13;
 293        if (size <=  16 * 1024) return 14;
 294        if (size <=  32 * 1024) return 15;
 295        if (size <=  64 * 1024) return 16;
 296        if (size <= 128 * 1024) return 17;
 297        if (size <= 256 * 1024) return 18;
 298        if (size <= 512 * 1024) return 19;
 299        if (size <= 1024 * 1024) return 20;
 300        if (size <=  2 * 1024 * 1024) return 21;
 301        if (size <=  4 * 1024 * 1024) return 22;
 302        if (size <=  8 * 1024 * 1024) return 23;
 303        if (size <=  16 * 1024 * 1024) return 24;
 304        if (size <=  32 * 1024 * 1024) return 25;
 305        if (size <=  64 * 1024 * 1024) return 26;
 306        BUG();
 307
 308        /* Will never be reached. Needed because the compiler may complain */
 309        return -1;
 310}
 311#endif /* !CONFIG_SLOB */
 312
 313void *__kmalloc(size_t size, gfp_t flags);
 314void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
 315
 316#ifdef CONFIG_NUMA
 317void *__kmalloc_node(size_t size, gfp_t flags, int node);
 318void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 319#else
 320static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
 321{
 322        return __kmalloc(size, flags);
 323}
 324
 325static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
 326{
 327        return kmem_cache_alloc(s, flags);
 328}
 329#endif
 330
 331#ifdef CONFIG_TRACING
 332extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
 333
 334#ifdef CONFIG_NUMA
 335extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
 336                                           gfp_t gfpflags,
 337                                           int node, size_t size);
 338#else
 339static __always_inline void *
 340kmem_cache_alloc_node_trace(struct kmem_cache *s,
 341                              gfp_t gfpflags,
 342                              int node, size_t size)
 343{
 344        return kmem_cache_alloc_trace(s, gfpflags, size);
 345}
 346#endif /* CONFIG_NUMA */
 347
 348#else /* CONFIG_TRACING */
 349static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
 350                gfp_t flags, size_t size)
 351{
 352        return kmem_cache_alloc(s, flags);
 353}
 354
 355static __always_inline void *
 356kmem_cache_alloc_node_trace(struct kmem_cache *s,
 357                              gfp_t gfpflags,
 358                              int node, size_t size)
 359{
 360        return kmem_cache_alloc_node(s, gfpflags, node);
 361}
 362#endif /* CONFIG_TRACING */
 363
 364#ifdef CONFIG_SLAB
 365#include <linux/slab_def.h>
 366#endif
 367
 368#ifdef CONFIG_SLUB
 369#include <linux/slub_def.h>
 370#endif
 371
 372static __always_inline void *
 373kmalloc_order(size_t size, gfp_t flags, unsigned int order)
 374{
 375        void *ret;
 376
 377        flags |= (__GFP_COMP | __GFP_KMEMCG);
 378        ret = (void *) __get_free_pages(flags, order);
 379        kmemleak_alloc(ret, size, 1, flags);
 380        return ret;
 381}
 382
 383#ifdef CONFIG_TRACING
 384extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
 385#else
 386static __always_inline void *
 387kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
 388{
 389        return kmalloc_order(size, flags, order);
 390}
 391#endif
 392
 393static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
 394{
 395        unsigned int order = get_order(size);
 396        return kmalloc_order_trace(size, flags, order);
 397}
 398
 399/**
 400 * kmalloc - allocate memory
 401 * @size: how many bytes of memory are required.
 402 * @flags: the type of memory to allocate.
 403 *
 404 * kmalloc is the normal method of allocating memory
 405 * for objects smaller than page size in the kernel.
 406 *
 407 * The @flags argument may be one of:
 408 *
 409 * %GFP_USER - Allocate memory on behalf of user.  May sleep.
 410 *
 411 * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
 412 *
 413 * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
 414 *   For example, use this inside interrupt handlers.
 415 *
 416 * %GFP_HIGHUSER - Allocate pages from high memory.
 417 *
 418 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
 419 *
 420 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
 421 *
 422 * %GFP_NOWAIT - Allocation will not sleep.
 423 *
 424 * %__GFP_THISNODE - Allocate node-local memory only.
 425 *
 426 * %GFP_DMA - Allocation suitable for DMA.
 427 *   Should only be used for kmalloc() caches. Otherwise, use a
 428 *   slab created with SLAB_DMA.
 429 *
 430 * Also it is possible to set different flags by OR'ing
 431 * in one or more of the following additional @flags:
 432 *
 433 * %__GFP_COLD - Request cache-cold pages instead of
 434 *   trying to return cache-warm pages.
 435 *
 436 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
 437 *
 438 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
 439 *   (think twice before using).
 440 *
 441 * %__GFP_NORETRY - If memory is not immediately available,
 442 *   then give up at once.
 443 *
 444 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
 445 *
 446 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
 447 *
 448 * There are other flags available as well, but these are not intended
 449 * for general use, and so are not documented here. For a full list of
 450 * potential flags, always refer to linux/gfp.h.
 451 */
 452static __always_inline void *kmalloc(size_t size, gfp_t flags)
 453{
 454        if (__builtin_constant_p(size)) {
 455                if (size > KMALLOC_MAX_CACHE_SIZE)
 456                        return kmalloc_large(size, flags);
 457#ifndef CONFIG_SLOB
 458                if (!(flags & GFP_DMA)) {
 459                        int index = kmalloc_index(size);
 460
 461                        if (!index)
 462                                return ZERO_SIZE_PTR;
 463
 464                        return kmem_cache_alloc_trace(kmalloc_caches[index],
 465                                        flags, size);
 466                }
 467#endif
 468        }
 469        return __kmalloc(size, flags);
 470}
 471
 472/*
 473 * Determine size used for the nth kmalloc cache.
 474 * return size or 0 if a kmalloc cache for that
 475 * size does not exist
 476 */
 477static __always_inline int kmalloc_size(int n)
 478{
 479#ifndef CONFIG_SLOB
 480        if (n > 2)
 481                return 1 << n;
 482
 483        if (n == 1 && KMALLOC_MIN_SIZE <= 32)
 484                return 96;
 485
 486        if (n == 2 && KMALLOC_MIN_SIZE <= 64)
 487                return 192;
 488#endif
 489        return 0;
 490}
 491
 492static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 493{
 494#ifndef CONFIG_SLOB
 495        if (__builtin_constant_p(size) &&
 496                size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
 497                int i = kmalloc_index(size);
 498
 499                if (!i)
 500                        return ZERO_SIZE_PTR;
 501
 502                return kmem_cache_alloc_node_trace(kmalloc_caches[i],
 503                                                flags, node, size);
 504        }
 505#endif
 506        return __kmalloc_node(size, flags, node);
 507}
 508
 509/*
 510 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
 511 * Intended for arches that get misalignment faults even for 64 bit integer
 512 * aligned buffers.
 513 */
 514#ifndef ARCH_SLAB_MINALIGN
 515#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
 516#endif
 517/*
 518 * This is the main placeholder for memcg-related information in kmem caches.
 519 * struct kmem_cache will hold a pointer to it, so the memory cost while
 520 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
 521 * would otherwise be if that would be bundled in kmem_cache: we'll need an
 522 * extra pointer chase. But the trade off clearly lays in favor of not
 523 * penalizing non-users.
 524 *
 525 * Both the root cache and the child caches will have it. For the root cache,
 526 * this will hold a dynamically allocated array large enough to hold
 527 * information about the currently limited memcgs in the system. To allow the
 528 * array to be accessed without taking any locks, on relocation we free the old
 529 * version only after a grace period.
 530 *
 531 * Child caches will hold extra metadata needed for its operation. Fields are:
 532 *
 533 * @memcg: pointer to the memcg this cache belongs to
 534 * @list: list_head for the list of all caches in this memcg
 535 * @root_cache: pointer to the global, root cache, this cache was derived from
 536 * @dead: set to true after the memcg dies; the cache may still be around.
 537 * @nr_pages: number of pages that belongs to this cache.
 538 * @destroy: worker to be called whenever we are ready, or believe we may be
 539 *           ready, to destroy this cache.
 540 */
 541struct memcg_cache_params {
 542        bool is_root_cache;
 543        union {
 544                struct {
 545                        struct rcu_head rcu_head;
 546                        struct kmem_cache *memcg_caches[0];
 547                };
 548                struct {
 549                        struct mem_cgroup *memcg;
 550                        struct list_head list;
 551                        struct kmem_cache *root_cache;
 552                        bool dead;
 553                        atomic_t nr_pages;
 554                        struct work_struct destroy;
 555                };
 556        };
 557};
 558
 559int memcg_update_all_caches(int num_memcgs);
 560
 561struct seq_file;
 562int cache_show(struct kmem_cache *s, struct seq_file *m);
 563void print_slabinfo_header(struct seq_file *m);
 564
 565/**
 566 * kmalloc_array - allocate memory for an array.
 567 * @n: number of elements.
 568 * @size: element size.
 569 * @flags: the type of memory to allocate (see kmalloc).
 570 */
 571static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
 572{
 573        if (size != 0 && n > SIZE_MAX / size)
 574                return NULL;
 575        return __kmalloc(n * size, flags);
 576}
 577
 578/**
 579 * kcalloc - allocate memory for an array. The memory is set to zero.
 580 * @n: number of elements.
 581 * @size: element size.
 582 * @flags: the type of memory to allocate (see kmalloc).
 583 */
 584static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
 585{
 586        return kmalloc_array(n, size, flags | __GFP_ZERO);
 587}
 588
 589/*
 590 * kmalloc_track_caller is a special version of kmalloc that records the
 591 * calling function of the routine calling it for slab leak tracking instead
 592 * of just the calling function (confusing, eh?).
 593 * It's useful when the call to kmalloc comes from a widely-used standard
 594 * allocator where we care about the real place the memory allocation
 595 * request comes from.
 596 */
 597#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
 598        (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
 599        (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
 600extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
 601#define kmalloc_track_caller(size, flags) \
 602        __kmalloc_track_caller(size, flags, _RET_IP_)
 603#else
 604#define kmalloc_track_caller(size, flags) \
 605        __kmalloc(size, flags)
 606#endif /* DEBUG_SLAB */
 607
 608#ifdef CONFIG_NUMA
 609/*
 610 * kmalloc_node_track_caller is a special version of kmalloc_node that
 611 * records the calling function of the routine calling it for slab leak
 612 * tracking instead of just the calling function (confusing, eh?).
 613 * It's useful when the call to kmalloc_node comes from a widely-used
 614 * standard allocator where we care about the real place the memory
 615 * allocation request comes from.
 616 */
 617#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
 618        (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
 619        (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
 620extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
 621#define kmalloc_node_track_caller(size, flags, node) \
 622        __kmalloc_node_track_caller(size, flags, node, \
 623                        _RET_IP_)
 624#else
 625#define kmalloc_node_track_caller(size, flags, node) \
 626        __kmalloc_node(size, flags, node)
 627#endif
 628
 629#else /* CONFIG_NUMA */
 630
 631#define kmalloc_node_track_caller(size, flags, node) \
 632        kmalloc_track_caller(size, flags)
 633
 634#endif /* CONFIG_NUMA */
 635
 636/*
 637 * Shortcuts
 638 */
 639static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
 640{
 641        return kmem_cache_alloc(k, flags | __GFP_ZERO);
 642}
 643
 644/**
 645 * kzalloc - allocate memory. The memory is set to zero.
 646 * @size: how many bytes of memory are required.
 647 * @flags: the type of memory to allocate (see kmalloc).
 648 */
 649static inline void *kzalloc(size_t size, gfp_t flags)
 650{
 651        return kmalloc(size, flags | __GFP_ZERO);
 652}
 653
 654/**
 655 * kzalloc_node - allocate zeroed memory from a particular memory node.
 656 * @size: how many bytes of memory are required.
 657 * @flags: the type of memory to allocate (see kmalloc).
 658 * @node: memory node from which to allocate
 659 */
 660static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
 661{
 662        return kmalloc_node(size, flags | __GFP_ZERO, node);
 663}
 664
 665/*
 666 * Determine the size of a slab object
 667 */
 668static inline unsigned int kmem_cache_size(struct kmem_cache *s)
 669{
 670        return s->object_size;
 671}
 672
 673void __init kmem_cache_init_late(void);
 674
 675#endif  /* _LINUX_SLAB_H */
 676