linux/include/linux/slub_def.h
<<
>>
Prefs
   1#ifndef _LINUX_SLUB_DEF_H
   2#define _LINUX_SLUB_DEF_H
   3
   4/*
   5 * SLUB : A Slab allocator without object queues.
   6 *
   7 * (C) 2007 SGI, Christoph Lameter
   8 */
   9#include <linux/types.h>
  10#include <linux/gfp.h>
  11#include <linux/bug.h>
  12#include <linux/workqueue.h>
  13#include <linux/kobject.h>
  14
  15#include <linux/kmemleak.h>
  16
  17enum stat_item {
  18        ALLOC_FASTPATH,         /* Allocation from cpu slab */
  19        ALLOC_SLOWPATH,         /* Allocation by getting a new cpu slab */
  20        FREE_FASTPATH,          /* Free to cpu slub */
  21        FREE_SLOWPATH,          /* Freeing not to cpu slab */
  22        FREE_FROZEN,            /* Freeing to frozen slab */
  23        FREE_ADD_PARTIAL,       /* Freeing moves slab to partial list */
  24        FREE_REMOVE_PARTIAL,    /* Freeing removes last object */
  25        ALLOC_FROM_PARTIAL,     /* Cpu slab acquired from node partial list */
  26        ALLOC_SLAB,             /* Cpu slab acquired from page allocator */
  27        ALLOC_REFILL,           /* Refill cpu slab from slab freelist */
  28        ALLOC_NODE_MISMATCH,    /* Switching cpu slab */
  29        FREE_SLAB,              /* Slab freed to the page allocator */
  30        CPUSLAB_FLUSH,          /* Abandoning of the cpu slab */
  31        DEACTIVATE_FULL,        /* Cpu slab was full when deactivated */
  32        DEACTIVATE_EMPTY,       /* Cpu slab was empty when deactivated */
  33        DEACTIVATE_TO_HEAD,     /* Cpu slab was moved to the head of partials */
  34        DEACTIVATE_TO_TAIL,     /* Cpu slab was moved to the tail of partials */
  35        DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
  36        DEACTIVATE_BYPASS,      /* Implicit deactivation */
  37        ORDER_FALLBACK,         /* Number of times fallback was necessary */
  38        CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
  39        CMPXCHG_DOUBLE_FAIL,    /* Number of times that cmpxchg double did not match */
  40        CPU_PARTIAL_ALLOC,      /* Used cpu partial on alloc */
  41        CPU_PARTIAL_FREE,       /* Refill cpu partial on free */
  42        CPU_PARTIAL_NODE,       /* Refill cpu partial from node partial */
  43        CPU_PARTIAL_DRAIN,      /* Drain cpu partial to node partial */
  44        NR_SLUB_STAT_ITEMS };
  45
  46struct kmem_cache_cpu {
  47        void **freelist;        /* Pointer to next available object */
  48        unsigned long tid;      /* Globally unique transaction id */
  49        struct page *page;      /* The slab from which we are allocating */
  50        struct page *partial;   /* Partially allocated frozen slabs */
  51#ifdef CONFIG_SLUB_STATS
  52        unsigned stat[NR_SLUB_STAT_ITEMS];
  53#endif
  54};
  55
  56/*
  57 * Word size structure that can be atomically updated or read and that
  58 * contains both the order and the number of objects that a slab of the
  59 * given order would contain.
  60 */
  61struct kmem_cache_order_objects {
  62        unsigned long x;
  63};
  64
  65/*
  66 * Slab cache management.
  67 */
  68struct kmem_cache {
  69        struct kmem_cache_cpu __percpu *cpu_slab;
  70        /* Used for retriving partial slabs etc */
  71        unsigned long flags;
  72        unsigned long min_partial;
  73        int size;               /* The size of an object including meta data */
  74        int object_size;        /* The size of an object without meta data */
  75        int offset;             /* Free pointer offset. */
  76        int cpu_partial;        /* Number of per cpu partial objects to keep around */
  77        struct kmem_cache_order_objects oo;
  78
  79        /* Allocation and freeing of slabs */
  80        struct kmem_cache_order_objects max;
  81        struct kmem_cache_order_objects min;
  82        gfp_t allocflags;       /* gfp flags to use on each alloc */
  83        int refcount;           /* Refcount for slab cache destroy */
  84        void (*ctor)(void *);
  85        int inuse;              /* Offset to metadata */
  86        int align;              /* Alignment */
  87        int reserved;           /* Reserved bytes at the end of slabs */
  88        const char *name;       /* Name (only for display!) */
  89        struct list_head list;  /* List of slab caches */
  90#ifdef CONFIG_SYSFS
  91        struct kobject kobj;    /* For sysfs */
  92#endif
  93#ifdef CONFIG_MEMCG_KMEM
  94        struct memcg_cache_params *memcg_params;
  95        int max_attr_size; /* for propagation, maximum size of a stored attr */
  96#endif
  97
  98#ifdef CONFIG_NUMA
  99        /*
 100         * Defragmentation by allocating from a remote node.
 101         */
 102        int remote_node_defrag_ratio;
 103#endif
 104        struct kmem_cache_node *node[MAX_NUMNODES];
 105};
 106
 107void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 108void *__kmalloc(size_t size, gfp_t flags);
 109
 110static __always_inline void *
 111kmalloc_order(size_t size, gfp_t flags, unsigned int order)
 112{
 113        void *ret;
 114
 115        flags |= (__GFP_COMP | __GFP_KMEMCG);
 116        ret = (void *) __get_free_pages(flags, order);
 117        kmemleak_alloc(ret, size, 1, flags);
 118        return ret;
 119}
 120
 121/**
 122 * Calling this on allocated memory will check that the memory
 123 * is expected to be in use, and print warnings if not.
 124 */
 125#ifdef CONFIG_SLUB_DEBUG
 126extern bool verify_mem_not_deleted(const void *x);
 127#else
 128static inline bool verify_mem_not_deleted(const void *x)
 129{
 130        return true;
 131}
 132#endif
 133
 134#ifdef CONFIG_TRACING
 135extern void *
 136kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
 137extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
 138#else
 139static __always_inline void *
 140kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 141{
 142        return kmem_cache_alloc(s, gfpflags);
 143}
 144
 145static __always_inline void *
 146kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
 147{
 148        return kmalloc_order(size, flags, order);
 149}
 150#endif
 151
 152static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
 153{
 154        unsigned int order = get_order(size);
 155        return kmalloc_order_trace(size, flags, order);
 156}
 157
 158static __always_inline void *kmalloc(size_t size, gfp_t flags)
 159{
 160        if (__builtin_constant_p(size)) {
 161                if (size > KMALLOC_MAX_CACHE_SIZE)
 162                        return kmalloc_large(size, flags);
 163
 164                if (!(flags & GFP_DMA)) {
 165                        int index = kmalloc_index(size);
 166
 167                        if (!index)
 168                                return ZERO_SIZE_PTR;
 169
 170                        return kmem_cache_alloc_trace(kmalloc_caches[index],
 171                                        flags, size);
 172                }
 173        }
 174        return __kmalloc(size, flags);
 175}
 176
 177#ifdef CONFIG_NUMA
 178void *__kmalloc_node(size_t size, gfp_t flags, int node);
 179void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 180
 181#ifdef CONFIG_TRACING
 182extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
 183                                           gfp_t gfpflags,
 184                                           int node, size_t size);
 185#else
 186static __always_inline void *
 187kmem_cache_alloc_node_trace(struct kmem_cache *s,
 188                              gfp_t gfpflags,
 189                              int node, size_t size)
 190{
 191        return kmem_cache_alloc_node(s, gfpflags, node);
 192}
 193#endif
 194
 195static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 196{
 197        if (__builtin_constant_p(size) &&
 198                size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
 199                int index = kmalloc_index(size);
 200
 201                if (!index)
 202                        return ZERO_SIZE_PTR;
 203
 204                return kmem_cache_alloc_node_trace(kmalloc_caches[index],
 205                               flags, node, size);
 206        }
 207        return __kmalloc_node(size, flags, node);
 208}
 209#endif
 210
 211#endif /* _LINUX_SLUB_DEF_H */
 212