linux/include/linux/slub_def.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SLUB_DEF_H
   3#define _LINUX_SLUB_DEF_H
   4
   5/*
   6 * SLUB : A Slab allocator without object queues.
   7 *
   8 * (C) 2007 SGI, Christoph Lameter
   9 */
  10#include <linux/kfence.h>
  11#include <linux/kobject.h>
  12#include <linux/reciprocal_div.h>
  13#include <linux/local_lock.h>
  14
  15enum stat_item {
  16        ALLOC_FASTPATH,         /* Allocation from cpu slab */
  17        ALLOC_SLOWPATH,         /* Allocation by getting a new cpu slab */
  18        FREE_FASTPATH,          /* Free to cpu slab */
  19        FREE_SLOWPATH,          /* Freeing not to cpu slab */
  20        FREE_FROZEN,            /* Freeing to frozen slab */
  21        FREE_ADD_PARTIAL,       /* Freeing moves slab to partial list */
  22        FREE_REMOVE_PARTIAL,    /* Freeing removes last object */
  23        ALLOC_FROM_PARTIAL,     /* Cpu slab acquired from node partial list */
  24        ALLOC_SLAB,             /* Cpu slab acquired from page allocator */
  25        ALLOC_REFILL,           /* Refill cpu slab from slab freelist */
  26        ALLOC_NODE_MISMATCH,    /* Switching cpu slab */
  27        FREE_SLAB,              /* Slab freed to the page allocator */
  28        CPUSLAB_FLUSH,          /* Abandoning of the cpu slab */
  29        DEACTIVATE_FULL,        /* Cpu slab was full when deactivated */
  30        DEACTIVATE_EMPTY,       /* Cpu slab was empty when deactivated */
  31        DEACTIVATE_TO_HEAD,     /* Cpu slab was moved to the head of partials */
  32        DEACTIVATE_TO_TAIL,     /* Cpu slab was moved to the tail of partials */
  33        DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
  34        DEACTIVATE_BYPASS,      /* Implicit deactivation */
  35        ORDER_FALLBACK,         /* Number of times fallback was necessary */
  36        CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
  37        CMPXCHG_DOUBLE_FAIL,    /* Number of times that cmpxchg double did not match */
  38        CPU_PARTIAL_ALLOC,      /* Used cpu partial on alloc */
  39        CPU_PARTIAL_FREE,       /* Refill cpu partial on free */
  40        CPU_PARTIAL_NODE,       /* Refill cpu partial from node partial */
  41        CPU_PARTIAL_DRAIN,      /* Drain cpu partial to node partial */
  42        NR_SLUB_STAT_ITEMS };
  43
  44/*
  45 * When changing the layout, make sure freelist and tid are still compatible
  46 * with this_cpu_cmpxchg_double() alignment requirements.
  47 */
  48struct kmem_cache_cpu {
  49        void **freelist;        /* Pointer to next available object */
  50        unsigned long tid;      /* Globally unique transaction id */
  51        struct page *page;      /* The slab from which we are allocating */
  52#ifdef CONFIG_SLUB_CPU_PARTIAL
  53        struct page *partial;   /* Partially allocated frozen slabs */
  54#endif
  55        local_lock_t lock;      /* Protects the fields above */
  56#ifdef CONFIG_SLUB_STATS
  57        unsigned stat[NR_SLUB_STAT_ITEMS];
  58#endif
  59};
  60
  61#ifdef CONFIG_SLUB_CPU_PARTIAL
  62#define slub_percpu_partial(c)          ((c)->partial)
  63
  64#define slub_set_percpu_partial(c, p)           \
  65({                                              \
  66        slub_percpu_partial(c) = (p)->next;     \
  67})
  68
  69#define slub_percpu_partial_read_once(c)     READ_ONCE(slub_percpu_partial(c))
  70#else
  71#define slub_percpu_partial(c)                  NULL
  72
  73#define slub_set_percpu_partial(c, p)
  74
  75#define slub_percpu_partial_read_once(c)        NULL
  76#endif // CONFIG_SLUB_CPU_PARTIAL
  77
  78/*
  79 * Word size structure that can be atomically updated or read and that
  80 * contains both the order and the number of objects that a slab of the
  81 * given order would contain.
  82 */
  83struct kmem_cache_order_objects {
  84        unsigned int x;
  85};
  86
  87/*
  88 * Slab cache management.
  89 */
  90struct kmem_cache {
  91        struct kmem_cache_cpu __percpu *cpu_slab;
  92        /* Used for retrieving partial slabs, etc. */
  93        slab_flags_t flags;
  94        unsigned long min_partial;
  95        unsigned int size;      /* The size of an object including metadata */
  96        unsigned int object_size;/* The size of an object without metadata */
  97        struct reciprocal_value reciprocal_size;
  98        unsigned int offset;    /* Free pointer offset */
  99#ifdef CONFIG_SLUB_CPU_PARTIAL
 100        /* Number of per cpu partial objects to keep around */
 101        unsigned int cpu_partial;
 102#endif
 103        struct kmem_cache_order_objects oo;
 104
 105        /* Allocation and freeing of slabs */
 106        struct kmem_cache_order_objects max;
 107        struct kmem_cache_order_objects min;
 108        gfp_t allocflags;       /* gfp flags to use on each alloc */
 109        int refcount;           /* Refcount for slab cache destroy */
 110        void (*ctor)(void *);
 111        unsigned int inuse;             /* Offset to metadata */
 112        unsigned int align;             /* Alignment */
 113        unsigned int red_left_pad;      /* Left redzone padding size */
 114        const char *name;       /* Name (only for display!) */
 115        struct list_head list;  /* List of slab caches */
 116#ifdef CONFIG_SYSFS
 117        struct kobject kobj;    /* For sysfs */
 118#endif
 119#ifdef CONFIG_SLAB_FREELIST_HARDENED
 120        unsigned long random;
 121#endif
 122
 123#ifdef CONFIG_NUMA
 124        /*
 125         * Defragmentation by allocating from a remote node.
 126         */
 127        unsigned int remote_node_defrag_ratio;
 128#endif
 129
 130#ifdef CONFIG_SLAB_FREELIST_RANDOM
 131        unsigned int *random_seq;
 132#endif
 133
 134#ifdef CONFIG_KASAN
 135        struct kasan_cache kasan_info;
 136#endif
 137
 138        unsigned int useroffset;        /* Usercopy region offset */
 139        unsigned int usersize;          /* Usercopy region size */
 140
 141        struct kmem_cache_node *node[MAX_NUMNODES];
 142};
 143
 144#ifdef CONFIG_SLUB_CPU_PARTIAL
 145#define slub_cpu_partial(s)             ((s)->cpu_partial)
 146#define slub_set_cpu_partial(s, n)              \
 147({                                              \
 148        slub_cpu_partial(s) = (n);              \
 149})
 150#else
 151#define slub_cpu_partial(s)             (0)
 152#define slub_set_cpu_partial(s, n)
 153#endif /* CONFIG_SLUB_CPU_PARTIAL */
 154
 155#ifdef CONFIG_SYSFS
 156#define SLAB_SUPPORTS_SYSFS
 157void sysfs_slab_unlink(struct kmem_cache *);
 158void sysfs_slab_release(struct kmem_cache *);
 159#else
 160static inline void sysfs_slab_unlink(struct kmem_cache *s)
 161{
 162}
 163static inline void sysfs_slab_release(struct kmem_cache *s)
 164{
 165}
 166#endif
 167
 168void object_err(struct kmem_cache *s, struct page *page,
 169                u8 *object, char *reason);
 170
 171void *fixup_red_left(struct kmem_cache *s, void *p);
 172
 173static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
 174                                void *x) {
 175        void *object = x - (x - page_address(page)) % cache->size;
 176        void *last_object = page_address(page) +
 177                (page->objects - 1) * cache->size;
 178        void *result = (unlikely(object > last_object)) ? last_object : object;
 179
 180        result = fixup_red_left(cache, result);
 181        return result;
 182}
 183
 184/* Determine object index from a given position */
 185static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
 186                                          void *addr, void *obj)
 187{
 188        return reciprocal_divide(kasan_reset_tag(obj) - addr,
 189                                 cache->reciprocal_size);
 190}
 191
 192static inline unsigned int obj_to_index(const struct kmem_cache *cache,
 193                                        const struct page *page, void *obj)
 194{
 195        if (is_kfence_address(obj))
 196                return 0;
 197        return __obj_to_index(cache, page_address(page), obj);
 198}
 199
 200static inline int objs_per_slab_page(const struct kmem_cache *cache,
 201                                     const struct page *page)
 202{
 203        return page->objects;
 204}
 205#endif /* _LINUX_SLUB_DEF_H */
 206