linux/include/linux/slab_def.h
<<
>>
Prefs
   1#ifndef _LINUX_SLAB_DEF_H
   2#define _LINUX_SLAB_DEF_H
   3
   4/*
   5 * Definitions unique to the original Linux SLAB allocator.
   6 *
   7 * What we provide here is a way to optimize the frequent kmalloc
   8 * calls in the kernel by selecting the appropriate general cache
   9 * if kmalloc was called with a size that can be established at
  10 * compile time.
  11 */
  12
  13#include <linux/init.h>
  14#include <linux/compiler.h>
  15
  16/*
  17 * struct kmem_cache
  18 *
  19 * manages a cache.
  20 */
  21
  22struct kmem_cache {
  23/* 1) Cache tunables. Protected by cache_chain_mutex */
  24        unsigned int batchcount;
  25        unsigned int limit;
  26        unsigned int shared;
  27
  28        unsigned int size;
  29        u32 reciprocal_buffer_size;
  30/* 2) touched by every alloc & free from the backend */
  31
  32        unsigned int flags;             /* constant flags */
  33        unsigned int num;               /* # of objs per slab */
  34
  35/* 3) cache_grow/shrink */
  36        /* order of pgs per slab (2^n) */
  37        unsigned int gfporder;
  38
  39        /* force GFP flags, e.g. GFP_DMA */
  40        gfp_t allocflags;
  41
  42        size_t colour;                  /* cache colouring range */
  43        unsigned int colour_off;        /* colour offset */
  44        struct kmem_cache *slabp_cache;
  45        unsigned int slab_size;
  46
  47        /* constructor func */
  48        void (*ctor)(void *obj);
  49
  50/* 4) cache creation/removal */
  51        const char *name;
  52        struct list_head list;
  53        int refcount;
  54        int object_size;
  55        int align;
  56
  57/* 5) statistics */
  58#ifdef CONFIG_DEBUG_SLAB
  59        unsigned long num_active;
  60        unsigned long num_allocations;
  61        unsigned long high_mark;
  62        unsigned long grown;
  63        unsigned long reaped;
  64        unsigned long errors;
  65        unsigned long max_freeable;
  66        unsigned long node_allocs;
  67        unsigned long node_frees;
  68        unsigned long node_overflow;
  69        atomic_t allochit;
  70        atomic_t allocmiss;
  71        atomic_t freehit;
  72        atomic_t freemiss;
  73
  74        /*
  75         * If debugging is enabled, then the allocator can add additional
  76         * fields and/or padding to every object. size contains the total
  77         * object size including these internal fields, the following two
  78         * variables contain the offset to the user object and its size.
  79         */
  80        int obj_offset;
  81#endif /* CONFIG_DEBUG_SLAB */
  82#ifdef CONFIG_MEMCG_KMEM
  83        struct memcg_cache_params *memcg_params;
  84#endif
  85
  86/* 6) per-cpu/per-node data, touched during every alloc/free */
  87        /*
  88         * We put array[] at the end of kmem_cache, because we want to size
  89         * this array to nr_cpu_ids slots instead of NR_CPUS
  90         * (see kmem_cache_init())
  91         * We still use [NR_CPUS] and not [1] or [0] because cache_cache
  92         * is statically defined, so we reserve the max number of cpus.
  93         *
  94         * We also need to guarantee that the list is able to accomodate a
  95         * pointer for each node since "nodelists" uses the remainder of
  96         * available pointers.
  97         */
  98        struct kmem_cache_node **node;
  99        struct array_cache *array[NR_CPUS + MAX_NUMNODES];
 100        /*
 101         * Do not add fields after array[]
 102         */
 103};
 104
 105void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 106void *__kmalloc(size_t size, gfp_t flags);
 107
 108#ifdef CONFIG_TRACING
 109extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
 110#else
 111static __always_inline void *
 112kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
 113{
 114        return kmem_cache_alloc(cachep, flags);
 115}
 116#endif
 117
 118static __always_inline void *kmalloc(size_t size, gfp_t flags)
 119{
 120        struct kmem_cache *cachep;
 121        void *ret;
 122
 123        if (__builtin_constant_p(size)) {
 124                int i;
 125
 126                if (!size)
 127                        return ZERO_SIZE_PTR;
 128
 129                if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
 130                        return NULL;
 131
 132                i = kmalloc_index(size);
 133
 134#ifdef CONFIG_ZONE_DMA
 135                if (flags & GFP_DMA)
 136                        cachep = kmalloc_dma_caches[i];
 137                else
 138#endif
 139                        cachep = kmalloc_caches[i];
 140
 141                ret = kmem_cache_alloc_trace(cachep, flags, size);
 142
 143                return ret;
 144        }
 145        return __kmalloc(size, flags);
 146}
 147
 148#ifdef CONFIG_NUMA
 149extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
 150extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 151
 152#ifdef CONFIG_TRACING
 153extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
 154                                         gfp_t flags,
 155                                         int nodeid,
 156                                         size_t size);
 157#else
 158static __always_inline void *
 159kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
 160                            gfp_t flags,
 161                            int nodeid,
 162                            size_t size)
 163{
 164        return kmem_cache_alloc_node(cachep, flags, nodeid);
 165}
 166#endif
 167
 168static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 169{
 170        struct kmem_cache *cachep;
 171
 172        if (__builtin_constant_p(size)) {
 173                int i;
 174
 175                if (!size)
 176                        return ZERO_SIZE_PTR;
 177
 178                if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
 179                        return NULL;
 180
 181                i = kmalloc_index(size);
 182
 183#ifdef CONFIG_ZONE_DMA
 184                if (flags & GFP_DMA)
 185                        cachep = kmalloc_dma_caches[i];
 186                else
 187#endif
 188                        cachep = kmalloc_caches[i];
 189
 190                return kmem_cache_alloc_node_trace(cachep, flags, node, size);
 191        }
 192        return __kmalloc_node(size, flags, node);
 193}
 194
 195#endif  /* CONFIG_NUMA */
 196
 197#endif  /* _LINUX_SLAB_DEF_H */
 198