linux/include/linux/slab_def.h
<<
>>
Prefs
   1#ifndef _LINUX_SLAB_DEF_H
   2#define _LINUX_SLAB_DEF_H
   3
   4/*
   5 * Definitions unique to the original Linux SLAB allocator.
   6 *
   7 * What we provide here is a way to optimize the frequent kmalloc
   8 * calls in the kernel by selecting the appropriate general cache
   9 * if kmalloc was called with a size that can be established at
  10 * compile time.
  11 */
  12
  13#include <linux/init.h>
  14#include <asm/page.h>           /* kmalloc_sizes.h needs PAGE_SIZE */
  15#include <asm/cache.h>          /* kmalloc_sizes.h needs L1_CACHE_BYTES */
  16#include <linux/compiler.h>
  17
  18#include <trace/events/kmem.h>
  19
  20/*
  21 * struct kmem_cache
  22 *
  23 * manages a cache.
  24 */
  25
  26struct kmem_cache {
  27/* 1) Cache tunables. Protected by cache_chain_mutex */
  28        unsigned int batchcount;
  29        unsigned int limit;
  30        unsigned int shared;
  31
  32        unsigned int buffer_size;
  33        u32 reciprocal_buffer_size;
  34/* 2) touched by every alloc & free from the backend */
  35
  36        unsigned int flags;             /* constant flags */
  37        unsigned int num;               /* # of objs per slab */
  38
  39/* 3) cache_grow/shrink */
  40        /* order of pgs per slab (2^n) */
  41        unsigned int gfporder;
  42
  43        /* force GFP flags, e.g. GFP_DMA */
  44        gfp_t gfpflags;
  45
  46        size_t colour;                  /* cache colouring range */
  47        unsigned int colour_off;        /* colour offset */
  48        struct kmem_cache *slabp_cache;
  49        unsigned int slab_size;
  50        unsigned int dflags;            /* dynamic flags */
  51
  52        /* constructor func */
  53        void (*ctor)(void *obj);
  54
  55/* 4) cache creation/removal */
  56        const char *name;
  57        struct list_head next;
  58
  59/* 5) statistics */
  60#ifdef CONFIG_DEBUG_SLAB
  61        unsigned long num_active;
  62        unsigned long num_allocations;
  63        unsigned long high_mark;
  64        unsigned long grown;
  65        unsigned long reaped;
  66        unsigned long errors;
  67        unsigned long max_freeable;
  68        unsigned long node_allocs;
  69        unsigned long node_frees;
  70        unsigned long node_overflow;
  71        atomic_t allochit;
  72        atomic_t allocmiss;
  73        atomic_t freehit;
  74        atomic_t freemiss;
  75
  76        /*
  77         * If debugging is enabled, then the allocator can add additional
  78         * fields and/or padding to every object. buffer_size contains the total
  79         * object size including these internal fields, the following two
  80         * variables contain the offset to the user object and its size.
  81         */
  82        int obj_offset;
  83        int obj_size;
  84#endif /* CONFIG_DEBUG_SLAB */
  85
  86/* 6) per-cpu/per-node data, touched during every alloc/free */
  87        /*
  88         * We put array[] at the end of kmem_cache, because we want to size
  89         * this array to nr_cpu_ids slots instead of NR_CPUS
  90         * (see kmem_cache_init())
  91         * We still use [NR_CPUS] and not [1] or [0] because cache_cache
  92         * is statically defined, so we reserve the max number of cpus.
  93         */
  94        struct kmem_list3 **nodelists;
  95        struct array_cache *array[NR_CPUS];
  96        /*
  97         * Do not add fields after array[]
  98         */
  99};
 100
 101/* Size description struct for general caches. */
 102struct cache_sizes {
 103        size_t                  cs_size;
 104        struct kmem_cache       *cs_cachep;
 105#ifdef CONFIG_ZONE_DMA
 106        struct kmem_cache       *cs_dmacachep;
 107#endif
 108};
 109extern struct cache_sizes malloc_sizes[];
 110
 111void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 112void *__kmalloc(size_t size, gfp_t flags);
 113
 114#ifdef CONFIG_TRACING
 115extern void *kmem_cache_alloc_trace(size_t size,
 116                                    struct kmem_cache *cachep, gfp_t flags);
 117extern size_t slab_buffer_size(struct kmem_cache *cachep);
 118#else
 119static __always_inline void *
 120kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
 121{
 122        return kmem_cache_alloc(cachep, flags);
 123}
 124static inline size_t slab_buffer_size(struct kmem_cache *cachep)
 125{
 126        return 0;
 127}
 128#endif
 129
 130static __always_inline void *kmalloc(size_t size, gfp_t flags)
 131{
 132        struct kmem_cache *cachep;
 133        void *ret;
 134
 135        if (__builtin_constant_p(size)) {
 136                int i = 0;
 137
 138                if (!size)
 139                        return ZERO_SIZE_PTR;
 140
 141#define CACHE(x) \
 142                if (size <= x) \
 143                        goto found; \
 144                else \
 145                        i++;
 146#include <linux/kmalloc_sizes.h>
 147#undef CACHE
 148                return NULL;
 149found:
 150#ifdef CONFIG_ZONE_DMA
 151                if (flags & GFP_DMA)
 152                        cachep = malloc_sizes[i].cs_dmacachep;
 153                else
 154#endif
 155                        cachep = malloc_sizes[i].cs_cachep;
 156
 157                ret = kmem_cache_alloc_trace(size, cachep, flags);
 158
 159                return ret;
 160        }
 161        return __kmalloc(size, flags);
 162}
 163
 164#ifdef CONFIG_NUMA
 165extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
 166extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 167
 168#ifdef CONFIG_TRACING
 169extern void *kmem_cache_alloc_node_trace(size_t size,
 170                                         struct kmem_cache *cachep,
 171                                         gfp_t flags,
 172                                         int nodeid);
 173#else
 174static __always_inline void *
 175kmem_cache_alloc_node_trace(size_t size,
 176                            struct kmem_cache *cachep,
 177                            gfp_t flags,
 178                            int nodeid)
 179{
 180        return kmem_cache_alloc_node(cachep, flags, nodeid);
 181}
 182#endif
 183
 184static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 185{
 186        struct kmem_cache *cachep;
 187
 188        if (__builtin_constant_p(size)) {
 189                int i = 0;
 190
 191                if (!size)
 192                        return ZERO_SIZE_PTR;
 193
 194#define CACHE(x) \
 195                if (size <= x) \
 196                        goto found; \
 197                else \
 198                        i++;
 199#include <linux/kmalloc_sizes.h>
 200#undef CACHE
 201                return NULL;
 202found:
 203#ifdef CONFIG_ZONE_DMA
 204                if (flags & GFP_DMA)
 205                        cachep = malloc_sizes[i].cs_dmacachep;
 206                else
 207#endif
 208                        cachep = malloc_sizes[i].cs_cachep;
 209
 210                return kmem_cache_alloc_node_trace(size, cachep, flags, node);
 211        }
 212        return __kmalloc_node(size, flags, node);
 213}
 214
 215#endif  /* CONFIG_NUMA */
 216
 217#endif  /* _LINUX_SLAB_DEF_H */
 218