linux/include/linux/slab_def.h
<<
>>
Prefs
   1#ifndef _LINUX_SLAB_DEF_H
   2#define _LINUX_SLAB_DEF_H
   3
   4/*
   5 * Definitions unique to the original Linux SLAB allocator.
   6 *
   7 * What we provide here is a way to optimize the frequent kmalloc
   8 * calls in the kernel by selecting the appropriate general cache
   9 * if kmalloc was called with a size that can be established at
  10 * compile time.
  11 */
  12
  13#include <linux/init.h>
  14#include <asm/page.h>           /* kmalloc_sizes.h needs PAGE_SIZE */
  15#include <asm/cache.h>          /* kmalloc_sizes.h needs L1_CACHE_BYTES */
  16#include <linux/compiler.h>
  17#include <linux/kmemtrace.h>
  18
  19/*
  20 * struct kmem_cache
  21 *
  22 * manages a cache.
  23 */
  24
  25struct kmem_cache {
  26/* 1) per-cpu data, touched during every alloc/free */
  27        struct array_cache *array[NR_CPUS];
  28/* 2) Cache tunables. Protected by cache_chain_mutex */
  29        unsigned int batchcount;
  30        unsigned int limit;
  31        unsigned int shared;
  32
  33        unsigned int buffer_size;
  34        u32 reciprocal_buffer_size;
  35/* 3) touched by every alloc & free from the backend */
  36
  37        unsigned int flags;             /* constant flags */
  38        unsigned int num;               /* # of objs per slab */
  39
  40/* 4) cache_grow/shrink */
  41        /* order of pgs per slab (2^n) */
  42        unsigned int gfporder;
  43
  44        /* force GFP flags, e.g. GFP_DMA */
  45        gfp_t gfpflags;
  46
  47        size_t colour;                  /* cache colouring range */
  48        unsigned int colour_off;        /* colour offset */
  49        struct kmem_cache *slabp_cache;
  50        unsigned int slab_size;
  51        unsigned int dflags;            /* dynamic flags */
  52
  53        /* constructor func */
  54        void (*ctor)(void *obj);
  55
  56/* 5) cache creation/removal */
  57        const char *name;
  58        struct list_head next;
  59
  60/* 6) statistics */
  61#ifdef CONFIG_DEBUG_SLAB
  62        unsigned long num_active;
  63        unsigned long num_allocations;
  64        unsigned long high_mark;
  65        unsigned long grown;
  66        unsigned long reaped;
  67        unsigned long errors;
  68        unsigned long max_freeable;
  69        unsigned long node_allocs;
  70        unsigned long node_frees;
  71        unsigned long node_overflow;
  72        atomic_t allochit;
  73        atomic_t allocmiss;
  74        atomic_t freehit;
  75        atomic_t freemiss;
  76
  77        /*
  78         * If debugging is enabled, then the allocator can add additional
  79         * fields and/or padding to every object. buffer_size contains the total
  80         * object size including these internal fields, the following two
  81         * variables contain the offset to the user object and its size.
  82         */
  83        int obj_offset;
  84        int obj_size;
  85#endif /* CONFIG_DEBUG_SLAB */
  86
  87        /*
  88         * We put nodelists[] at the end of kmem_cache, because we want to size
  89         * this array to nr_node_ids slots instead of MAX_NUMNODES
  90         * (see kmem_cache_init())
  91         * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
  92         * is statically defined, so we reserve the max number of nodes.
  93         */
  94        struct kmem_list3 *nodelists[MAX_NUMNODES];
  95        /*
  96         * Do not add fields after nodelists[]
  97         */
  98};
  99
 100/* Size description struct for general caches. */
 101struct cache_sizes {
 102        size_t                  cs_size;
 103        struct kmem_cache       *cs_cachep;
 104#ifdef CONFIG_ZONE_DMA
 105        struct kmem_cache       *cs_dmacachep;
 106#endif
 107};
 108extern struct cache_sizes malloc_sizes[];
 109
 110void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 111void *__kmalloc(size_t size, gfp_t flags);
 112
 113#ifdef CONFIG_KMEMTRACE
 114extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
 115extern size_t slab_buffer_size(struct kmem_cache *cachep);
 116#else
 117static __always_inline void *
 118kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
 119{
 120        return kmem_cache_alloc(cachep, flags);
 121}
 122static inline size_t slab_buffer_size(struct kmem_cache *cachep)
 123{
 124        return 0;
 125}
 126#endif
 127
 128static __always_inline void *kmalloc(size_t size, gfp_t flags)
 129{
 130        struct kmem_cache *cachep;
 131        void *ret;
 132
 133        if (__builtin_constant_p(size)) {
 134                int i = 0;
 135
 136                if (!size)
 137                        return ZERO_SIZE_PTR;
 138
 139#define CACHE(x) \
 140                if (size <= x) \
 141                        goto found; \
 142                else \
 143                        i++;
 144#include <linux/kmalloc_sizes.h>
 145#undef CACHE
 146                return NULL;
 147found:
 148#ifdef CONFIG_ZONE_DMA
 149                if (flags & GFP_DMA)
 150                        cachep = malloc_sizes[i].cs_dmacachep;
 151                else
 152#endif
 153                        cachep = malloc_sizes[i].cs_cachep;
 154
 155                ret = kmem_cache_alloc_notrace(cachep, flags);
 156
 157                trace_kmalloc(_THIS_IP_, ret,
 158                              size, slab_buffer_size(cachep), flags);
 159
 160                return ret;
 161        }
 162        return __kmalloc(size, flags);
 163}
 164
 165#ifdef CONFIG_NUMA
 166extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
 167extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 168
 169#ifdef CONFIG_KMEMTRACE
 170extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
 171                                           gfp_t flags,
 172                                           int nodeid);
 173#else
 174static __always_inline void *
 175kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
 176                              gfp_t flags,
 177                              int nodeid)
 178{
 179        return kmem_cache_alloc_node(cachep, flags, nodeid);
 180}
 181#endif
 182
 183static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 184{
 185        struct kmem_cache *cachep;
 186        void *ret;
 187
 188        if (__builtin_constant_p(size)) {
 189                int i = 0;
 190
 191                if (!size)
 192                        return ZERO_SIZE_PTR;
 193
 194#define CACHE(x) \
 195                if (size <= x) \
 196                        goto found; \
 197                else \
 198                        i++;
 199#include <linux/kmalloc_sizes.h>
 200#undef CACHE
 201                return NULL;
 202found:
 203#ifdef CONFIG_ZONE_DMA
 204                if (flags & GFP_DMA)
 205                        cachep = malloc_sizes[i].cs_dmacachep;
 206                else
 207#endif
 208                        cachep = malloc_sizes[i].cs_cachep;
 209
 210                ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
 211
 212                trace_kmalloc_node(_THIS_IP_, ret,
 213                                   size, slab_buffer_size(cachep),
 214                                   flags, node);
 215
 216                return ret;
 217        }
 218        return __kmalloc_node(size, flags, node);
 219}
 220
 221#endif  /* CONFIG_NUMA */
 222
 223#endif  /* _LINUX_SLAB_DEF_H */
 224