linux/include/linux/slab.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
   4 *
   5 * (C) SGI 2006, Christoph Lameter
   6 *      Cleaned up and restructured to ease the addition of alternative
   7 *      implementations of SLAB allocators.
   8 * (C) Linux Foundation 2008-2013
   9 *      Unified interface for all slab allocators
  10 */
  11
  12#ifndef _LINUX_SLAB_H
  13#define _LINUX_SLAB_H
  14
  15#include <linux/gfp.h>
  16#include <linux/overflow.h>
  17#include <linux/types.h>
  18#include <linux/workqueue.h>
  19#include <linux/percpu-refcount.h>
  20
  21
  22/*
  23 * Flags to pass to kmem_cache_create().
  24 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
  25 */
  26/* DEBUG: Perform (expensive) checks on alloc/free */
  27#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
  28/* DEBUG: Red zone objs in a cache */
  29#define SLAB_RED_ZONE           ((slab_flags_t __force)0x00000400U)
  30/* DEBUG: Poison objects */
  31#define SLAB_POISON             ((slab_flags_t __force)0x00000800U)
  32/* Align objs on cache lines */
  33#define SLAB_HWCACHE_ALIGN      ((slab_flags_t __force)0x00002000U)
  34/* Use GFP_DMA memory */
  35#define SLAB_CACHE_DMA          ((slab_flags_t __force)0x00004000U)
  36/* Use GFP_DMA32 memory */
  37#define SLAB_CACHE_DMA32        ((slab_flags_t __force)0x00008000U)
  38/* DEBUG: Store the last owner for bug hunting */
  39#define SLAB_STORE_USER         ((slab_flags_t __force)0x00010000U)
  40/* Panic if kmem_cache_create() fails */
  41#define SLAB_PANIC              ((slab_flags_t __force)0x00040000U)
  42/*
  43 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
  44 *
  45 * This delays freeing the SLAB page by a grace period, it does _NOT_
  46 * delay object freeing. This means that if you do kmem_cache_free()
  47 * that memory location is free to be reused at any time. Thus it may
  48 * be possible to see another object there in the same RCU grace period.
  49 *
  50 * This feature only ensures the memory location backing the object
  51 * stays valid, the trick to using this is relying on an independent
  52 * object validation pass. Something like:
  53 *
  54 *  rcu_read_lock()
  55 * again:
  56 *  obj = lockless_lookup(key);
  57 *  if (obj) {
  58 *    if (!try_get_ref(obj)) // might fail for free objects
  59 *      goto again;
  60 *
  61 *    if (obj->key != key) { // not the object we expected
  62 *      put_ref(obj);
  63 *      goto again;
  64 *    }
  65 *  }
  66 *  rcu_read_unlock();
  67 *
  68 * This is useful if we need to approach a kernel structure obliquely,
  69 * from its address obtained without the usual locking. We can lock
  70 * the structure to stabilize it and check it's still at the given address,
  71 * only if we can be sure that the memory has not been meanwhile reused
  72 * for some other kind of object (which our subsystem's lock might corrupt).
  73 *
  74 * rcu_read_lock before reading the address, then rcu_read_unlock after
  75 * taking the spinlock within the structure expected at that address.
  76 *
  77 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
  78 */
  79/* Defer freeing slabs to RCU */
  80#define SLAB_TYPESAFE_BY_RCU    ((slab_flags_t __force)0x00080000U)
  81/* Spread some memory over cpuset */
  82#define SLAB_MEM_SPREAD         ((slab_flags_t __force)0x00100000U)
  83/* Trace allocations and frees */
  84#define SLAB_TRACE              ((slab_flags_t __force)0x00200000U)
  85
  86/* Flag to prevent checks on free */
  87#ifdef CONFIG_DEBUG_OBJECTS
  88# define SLAB_DEBUG_OBJECTS     ((slab_flags_t __force)0x00400000U)
  89#else
  90# define SLAB_DEBUG_OBJECTS     0
  91#endif
  92
  93/* Avoid kmemleak tracing */
  94#define SLAB_NOLEAKTRACE        ((slab_flags_t __force)0x00800000U)
  95
  96/* Fault injection mark */
  97#ifdef CONFIG_FAILSLAB
  98# define SLAB_FAILSLAB          ((slab_flags_t __force)0x02000000U)
  99#else
 100# define SLAB_FAILSLAB          0
 101#endif
 102/* Account to memcg */
 103#ifdef CONFIG_MEMCG_KMEM
 104# define SLAB_ACCOUNT           ((slab_flags_t __force)0x04000000U)
 105#else
 106# define SLAB_ACCOUNT           0
 107#endif
 108
 109#ifdef CONFIG_KASAN
 110#define SLAB_KASAN              ((slab_flags_t __force)0x08000000U)
 111#else
 112#define SLAB_KASAN              0
 113#endif
 114
 115/* The following flags affect the page allocator grouping pages by mobility */
 116/* Objects are reclaimable */
 117#define SLAB_RECLAIM_ACCOUNT    ((slab_flags_t __force)0x00020000U)
 118#define SLAB_TEMPORARY          SLAB_RECLAIM_ACCOUNT    /* Objects are short-lived */
 119
 120/* Slab deactivation flag */
 121#define SLAB_DEACTIVATED        ((slab_flags_t __force)0x10000000U)
 122
 123/*
 124 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
 125 *
 126 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
 127 *
 128 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
 129 * Both make kfree a no-op.
 130 */
 131#define ZERO_SIZE_PTR ((void *)16)
 132
 133#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
 134                                (unsigned long)ZERO_SIZE_PTR)
 135
 136#include <linux/kasan.h>
 137
 138struct mem_cgroup;
 139/*
 140 * struct kmem_cache related prototypes
 141 */
 142void __init kmem_cache_init(void);
 143bool slab_is_available(void);
 144
 145extern bool usercopy_fallback;
 146
 147struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
 148                        unsigned int align, slab_flags_t flags,
 149                        void (*ctor)(void *));
 150struct kmem_cache *kmem_cache_create_usercopy(const char *name,
 151                        unsigned int size, unsigned int align,
 152                        slab_flags_t flags,
 153                        unsigned int useroffset, unsigned int usersize,
 154                        void (*ctor)(void *));
 155void kmem_cache_destroy(struct kmem_cache *);
 156int kmem_cache_shrink(struct kmem_cache *);
 157
 158/*
 159 * Please use this macro to create slab caches. Simply specify the
 160 * name of the structure and maybe some flags that are listed above.
 161 *
 162 * The alignment of the struct determines object alignment. If you
 163 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
 164 * then the objects will be properly aligned in SMP configurations.
 165 */
 166#define KMEM_CACHE(__struct, __flags)                                   \
 167                kmem_cache_create(#__struct, sizeof(struct __struct),   \
 168                        __alignof__(struct __struct), (__flags), NULL)
 169
 170/*
 171 * To whitelist a single field for copying to/from usercopy, use this
 172 * macro instead for KMEM_CACHE() above.
 173 */
 174#define KMEM_CACHE_USERCOPY(__struct, __flags, __field)                 \
 175                kmem_cache_create_usercopy(#__struct,                   \
 176                        sizeof(struct __struct),                        \
 177                        __alignof__(struct __struct), (__flags),        \
 178                        offsetof(struct __struct, __field),             \
 179                        sizeof_field(struct __struct, __field), NULL)
 180
 181/*
 182 * Common kmalloc functions provided by all allocators
 183 */
 184void * __must_check krealloc(const void *, size_t, gfp_t);
 185void kfree(const void *);
 186void kfree_sensitive(const void *);
 187size_t __ksize(const void *);
 188size_t ksize(const void *);
 189
 190#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
 191void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
 192                        bool to_user);
 193#else
 194static inline void __check_heap_object(const void *ptr, unsigned long n,
 195                                       struct page *page, bool to_user) { }
 196#endif
 197
 198/*
 199 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 200 * alignment larger than the alignment of a 64-bit integer.
 201 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
 202 */
 203#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
 204#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
 205#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
 206#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
 207#else
 208#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
 209#endif
 210
 211/*
 212 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
 213 * Intended for arches that get misalignment faults even for 64 bit integer
 214 * aligned buffers.
 215 */
 216#ifndef ARCH_SLAB_MINALIGN
 217#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
 218#endif
 219
 220/*
 221 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
 222 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
 223 * aligned pointers.
 224 */
 225#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
 226#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
 227#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
 228
 229/*
 230 * Kmalloc array related definitions
 231 */
 232
 233#ifdef CONFIG_SLAB
 234/*
 235 * The largest kmalloc size supported by the SLAB allocators is
 236 * 32 megabyte (2^25) or the maximum allocatable page order if that is
 237 * less than 32 MB.
 238 *
 239 * WARNING: Its not easy to increase this value since the allocators have
 240 * to do various tricks to work around compiler limitations in order to
 241 * ensure proper constant folding.
 242 */
 243#define KMALLOC_SHIFT_HIGH      ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
 244                                (MAX_ORDER + PAGE_SHIFT - 1) : 25)
 245#define KMALLOC_SHIFT_MAX       KMALLOC_SHIFT_HIGH
 246#ifndef KMALLOC_SHIFT_LOW
 247#define KMALLOC_SHIFT_LOW       5
 248#endif
 249#endif
 250
 251#ifdef CONFIG_SLUB
 252/*
 253 * SLUB directly allocates requests fitting in to an order-1 page
 254 * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
 255 */
 256#define KMALLOC_SHIFT_HIGH      (PAGE_SHIFT + 1)
 257#define KMALLOC_SHIFT_MAX       (MAX_ORDER + PAGE_SHIFT - 1)
 258#ifndef KMALLOC_SHIFT_LOW
 259#define KMALLOC_SHIFT_LOW       3
 260#endif
 261#endif
 262
 263#ifdef CONFIG_SLOB
 264/*
 265 * SLOB passes all requests larger than one page to the page allocator.
 266 * No kmalloc array is necessary since objects of different sizes can
 267 * be allocated from the same page.
 268 */
 269#define KMALLOC_SHIFT_HIGH      PAGE_SHIFT
 270#define KMALLOC_SHIFT_MAX       (MAX_ORDER + PAGE_SHIFT - 1)
 271#ifndef KMALLOC_SHIFT_LOW
 272#define KMALLOC_SHIFT_LOW       3
 273#endif
 274#endif
 275
 276/* Maximum allocatable size */
 277#define KMALLOC_MAX_SIZE        (1UL << KMALLOC_SHIFT_MAX)
 278/* Maximum size for which we actually use a slab cache */
 279#define KMALLOC_MAX_CACHE_SIZE  (1UL << KMALLOC_SHIFT_HIGH)
 280/* Maximum order allocatable via the slab allocator */
 281#define KMALLOC_MAX_ORDER       (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
 282
 283/*
 284 * Kmalloc subsystem.
 285 */
 286#ifndef KMALLOC_MIN_SIZE
 287#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
 288#endif
 289
 290/*
 291 * This restriction comes from byte sized index implementation.
 292 * Page size is normally 2^12 bytes and, in this case, if we want to use
 293 * byte sized index which can represent 2^8 entries, the size of the object
 294 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
 295 * If minimum size of kmalloc is less than 16, we use it as minimum object
 296 * size and give up to use byte sized index.
 297 */
 298#define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
 299                               (KMALLOC_MIN_SIZE) : 16)
 300
 301/*
 302 * Whenever changing this, take care of that kmalloc_type() and
 303 * create_kmalloc_caches() still work as intended.
 304 */
 305enum kmalloc_cache_type {
 306        KMALLOC_NORMAL = 0,
 307        KMALLOC_RECLAIM,
 308#ifdef CONFIG_ZONE_DMA
 309        KMALLOC_DMA,
 310#endif
 311        NR_KMALLOC_TYPES
 312};
 313
 314#ifndef CONFIG_SLOB
 315extern struct kmem_cache *
 316kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
 317
 318static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
 319{
 320#ifdef CONFIG_ZONE_DMA
 321        /*
 322         * The most common case is KMALLOC_NORMAL, so test for it
 323         * with a single branch for both flags.
 324         */
 325        if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
 326                return KMALLOC_NORMAL;
 327
 328        /*
 329         * At least one of the flags has to be set. If both are, __GFP_DMA
 330         * is more important.
 331         */
 332        return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
 333#else
 334        return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
 335#endif
 336}
 337
 338/*
 339 * Figure out which kmalloc slab an allocation of a certain size
 340 * belongs to.
 341 * 0 = zero alloc
 342 * 1 =  65 .. 96 bytes
 343 * 2 = 129 .. 192 bytes
 344 * n = 2^(n-1)+1 .. 2^n
 345 */
 346static __always_inline unsigned int kmalloc_index(size_t size)
 347{
 348        if (!size)
 349                return 0;
 350
 351        if (size <= KMALLOC_MIN_SIZE)
 352                return KMALLOC_SHIFT_LOW;
 353
 354        if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
 355                return 1;
 356        if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
 357                return 2;
 358        if (size <=          8) return 3;
 359        if (size <=         16) return 4;
 360        if (size <=         32) return 5;
 361        if (size <=         64) return 6;
 362        if (size <=        128) return 7;
 363        if (size <=        256) return 8;
 364        if (size <=        512) return 9;
 365        if (size <=       1024) return 10;
 366        if (size <=   2 * 1024) return 11;
 367        if (size <=   4 * 1024) return 12;
 368        if (size <=   8 * 1024) return 13;
 369        if (size <=  16 * 1024) return 14;
 370        if (size <=  32 * 1024) return 15;
 371        if (size <=  64 * 1024) return 16;
 372        if (size <= 128 * 1024) return 17;
 373        if (size <= 256 * 1024) return 18;
 374        if (size <= 512 * 1024) return 19;
 375        if (size <= 1024 * 1024) return 20;
 376        if (size <=  2 * 1024 * 1024) return 21;
 377        if (size <=  4 * 1024 * 1024) return 22;
 378        if (size <=  8 * 1024 * 1024) return 23;
 379        if (size <=  16 * 1024 * 1024) return 24;
 380        if (size <=  32 * 1024 * 1024) return 25;
 381        if (size <=  64 * 1024 * 1024) return 26;
 382        BUG();
 383
 384        /* Will never be reached. Needed because the compiler may complain */
 385        return -1;
 386}
 387#endif /* !CONFIG_SLOB */
 388
 389void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
 390void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
 391void kmem_cache_free(struct kmem_cache *, void *);
 392
 393/*
 394 * Bulk allocation and freeing operations. These are accelerated in an
 395 * allocator specific way to avoid taking locks repeatedly or building
 396 * metadata structures unnecessarily.
 397 *
 398 * Note that interrupts must be enabled when calling these functions.
 399 */
 400void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
 401int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
 402
 403/*
 404 * Caller must not use kfree_bulk() on memory not originally allocated
 405 * by kmalloc(), because the SLOB allocator cannot handle this.
 406 */
 407static __always_inline void kfree_bulk(size_t size, void **p)
 408{
 409        kmem_cache_free_bulk(NULL, size, p);
 410}
 411
 412#ifdef CONFIG_NUMA
 413void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
 414void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
 415#else
 416static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
 417{
 418        return __kmalloc(size, flags);
 419}
 420
 421static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
 422{
 423        return kmem_cache_alloc(s, flags);
 424}
 425#endif
 426
 427#ifdef CONFIG_TRACING
 428extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
 429
 430#ifdef CONFIG_NUMA
 431extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
 432                                           gfp_t gfpflags,
 433                                           int node, size_t size) __assume_slab_alignment __malloc;
 434#else
 435static __always_inline void *
 436kmem_cache_alloc_node_trace(struct kmem_cache *s,
 437                              gfp_t gfpflags,
 438                              int node, size_t size)
 439{
 440        return kmem_cache_alloc_trace(s, gfpflags, size);
 441}
 442#endif /* CONFIG_NUMA */
 443
 444#else /* CONFIG_TRACING */
 445static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
 446                gfp_t flags, size_t size)
 447{
 448        void *ret = kmem_cache_alloc(s, flags);
 449
 450        ret = kasan_kmalloc(s, ret, size, flags);
 451        return ret;
 452}
 453
 454static __always_inline void *
 455kmem_cache_alloc_node_trace(struct kmem_cache *s,
 456                              gfp_t gfpflags,
 457                              int node, size_t size)
 458{
 459        void *ret = kmem_cache_alloc_node(s, gfpflags, node);
 460
 461        ret = kasan_kmalloc(s, ret, size, gfpflags);
 462        return ret;
 463}
 464#endif /* CONFIG_TRACING */
 465
 466extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
 467
 468#ifdef CONFIG_TRACING
 469extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
 470#else
 471static __always_inline void *
 472kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
 473{
 474        return kmalloc_order(size, flags, order);
 475}
 476#endif
 477
 478static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
 479{
 480        unsigned int order = get_order(size);
 481        return kmalloc_order_trace(size, flags, order);
 482}
 483
 484/**
 485 * kmalloc - allocate memory
 486 * @size: how many bytes of memory are required.
 487 * @flags: the type of memory to allocate.
 488 *
 489 * kmalloc is the normal method of allocating memory
 490 * for objects smaller than page size in the kernel.
 491 *
 492 * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
 493 * bytes. For @size of power of two bytes, the alignment is also guaranteed
 494 * to be at least to the size.
 495 *
 496 * The @flags argument may be one of the GFP flags defined at
 497 * include/linux/gfp.h and described at
 498 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
 499 *
 500 * The recommended usage of the @flags is described at
 501 * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
 502 *
 503 * Below is a brief outline of the most useful GFP flags
 504 *
 505 * %GFP_KERNEL
 506 *      Allocate normal kernel ram. May sleep.
 507 *
 508 * %GFP_NOWAIT
 509 *      Allocation will not sleep.
 510 *
 511 * %GFP_ATOMIC
 512 *      Allocation will not sleep.  May use emergency pools.
 513 *
 514 * %GFP_HIGHUSER
 515 *      Allocate memory from high memory on behalf of user.
 516 *
 517 * Also it is possible to set different flags by OR'ing
 518 * in one or more of the following additional @flags:
 519 *
 520 * %__GFP_HIGH
 521 *      This allocation has high priority and may use emergency pools.
 522 *
 523 * %__GFP_NOFAIL
 524 *      Indicate that this allocation is in no way allowed to fail
 525 *      (think twice before using).
 526 *
 527 * %__GFP_NORETRY
 528 *      If memory is not immediately available,
 529 *      then give up at once.
 530 *
 531 * %__GFP_NOWARN
 532 *      If allocation fails, don't issue any warnings.
 533 *
 534 * %__GFP_RETRY_MAYFAIL
 535 *      Try really hard to succeed the allocation but fail
 536 *      eventually.
 537 */
 538static __always_inline void *kmalloc(size_t size, gfp_t flags)
 539{
 540        if (__builtin_constant_p(size)) {
 541#ifndef CONFIG_SLOB
 542                unsigned int index;
 543#endif
 544                if (size > KMALLOC_MAX_CACHE_SIZE)
 545                        return kmalloc_large(size, flags);
 546#ifndef CONFIG_SLOB
 547                index = kmalloc_index(size);
 548
 549                if (!index)
 550                        return ZERO_SIZE_PTR;
 551
 552                return kmem_cache_alloc_trace(
 553                                kmalloc_caches[kmalloc_type(flags)][index],
 554                                flags, size);
 555#endif
 556        }
 557        return __kmalloc(size, flags);
 558}
 559
 560static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 561{
 562#ifndef CONFIG_SLOB
 563        if (__builtin_constant_p(size) &&
 564                size <= KMALLOC_MAX_CACHE_SIZE) {
 565                unsigned int i = kmalloc_index(size);
 566
 567                if (!i)
 568                        return ZERO_SIZE_PTR;
 569
 570                return kmem_cache_alloc_node_trace(
 571                                kmalloc_caches[kmalloc_type(flags)][i],
 572                                                flags, node, size);
 573        }
 574#endif
 575        return __kmalloc_node(size, flags, node);
 576}
 577
 578/**
 579 * kmalloc_array - allocate memory for an array.
 580 * @n: number of elements.
 581 * @size: element size.
 582 * @flags: the type of memory to allocate (see kmalloc).
 583 */
 584static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
 585{
 586        size_t bytes;
 587
 588        if (unlikely(check_mul_overflow(n, size, &bytes)))
 589                return NULL;
 590        if (__builtin_constant_p(n) && __builtin_constant_p(size))
 591                return kmalloc(bytes, flags);
 592        return __kmalloc(bytes, flags);
 593}
 594
 595/**
 596 * kcalloc - allocate memory for an array. The memory is set to zero.
 597 * @n: number of elements.
 598 * @size: element size.
 599 * @flags: the type of memory to allocate (see kmalloc).
 600 */
 601static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
 602{
 603        return kmalloc_array(n, size, flags | __GFP_ZERO);
 604}
 605
 606/*
 607 * kmalloc_track_caller is a special version of kmalloc that records the
 608 * calling function of the routine calling it for slab leak tracking instead
 609 * of just the calling function (confusing, eh?).
 610 * It's useful when the call to kmalloc comes from a widely-used standard
 611 * allocator where we care about the real place the memory allocation
 612 * request comes from.
 613 */
 614extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
 615#define kmalloc_track_caller(size, flags) \
 616        __kmalloc_track_caller(size, flags, _RET_IP_)
 617
 618static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
 619                                       int node)
 620{
 621        size_t bytes;
 622
 623        if (unlikely(check_mul_overflow(n, size, &bytes)))
 624                return NULL;
 625        if (__builtin_constant_p(n) && __builtin_constant_p(size))
 626                return kmalloc_node(bytes, flags, node);
 627        return __kmalloc_node(bytes, flags, node);
 628}
 629
 630static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
 631{
 632        return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
 633}
 634
 635
 636#ifdef CONFIG_NUMA
 637extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
 638#define kmalloc_node_track_caller(size, flags, node) \
 639        __kmalloc_node_track_caller(size, flags, node, \
 640                        _RET_IP_)
 641
 642#else /* CONFIG_NUMA */
 643
 644#define kmalloc_node_track_caller(size, flags, node) \
 645        kmalloc_track_caller(size, flags)
 646
 647#endif /* CONFIG_NUMA */
 648
 649/*
 650 * Shortcuts
 651 */
 652static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
 653{
 654        return kmem_cache_alloc(k, flags | __GFP_ZERO);
 655}
 656
 657/**
 658 * kzalloc - allocate memory. The memory is set to zero.
 659 * @size: how many bytes of memory are required.
 660 * @flags: the type of memory to allocate (see kmalloc).
 661 */
 662static inline void *kzalloc(size_t size, gfp_t flags)
 663{
 664        return kmalloc(size, flags | __GFP_ZERO);
 665}
 666
 667/**
 668 * kzalloc_node - allocate zeroed memory from a particular memory node.
 669 * @size: how many bytes of memory are required.
 670 * @flags: the type of memory to allocate (see kmalloc).
 671 * @node: memory node from which to allocate
 672 */
 673static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
 674{
 675        return kmalloc_node(size, flags | __GFP_ZERO, node);
 676}
 677
 678unsigned int kmem_cache_size(struct kmem_cache *s);
 679void __init kmem_cache_init_late(void);
 680
 681#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
 682int slab_prepare_cpu(unsigned int cpu);
 683int slab_dead_cpu(unsigned int cpu);
 684#else
 685#define slab_prepare_cpu        NULL
 686#define slab_dead_cpu           NULL
 687#endif
 688
 689#endif  /* _LINUX_SLAB_H */
 690