linux/include/linux/slab_def.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SLAB_DEF_H
   3#define _LINUX_SLAB_DEF_H
   4
   5#include <linux/reciprocal_div.h>
   6
   7/*
   8 * Definitions unique to the original Linux SLAB allocator.
   9 */
  10
  11struct kmem_cache {
  12        struct array_cache __percpu *cpu_cache;
  13
  14/* 1) Cache tunables. Protected by slab_mutex */
  15        unsigned int batchcount;
  16        unsigned int limit;
  17        unsigned int shared;
  18
  19        unsigned int size;
  20        struct reciprocal_value reciprocal_buffer_size;
  21/* 2) touched by every alloc & free from the backend */
  22
  23        slab_flags_t flags;             /* constant flags */
  24        unsigned int num;               /* # of objs per slab */
  25
  26/* 3) cache_grow/shrink */
  27        /* order of pgs per slab (2^n) */
  28        unsigned int gfporder;
  29
  30        /* force GFP flags, e.g. GFP_DMA */
  31        gfp_t allocflags;
  32
  33        size_t colour;                  /* cache colouring range */
  34        unsigned int colour_off;        /* colour offset */
  35        struct kmem_cache *freelist_cache;
  36        unsigned int freelist_size;
  37
  38        /* constructor func */
  39        void (*ctor)(void *obj);
  40
  41/* 4) cache creation/removal */
  42        const char *name;
  43        struct list_head list;
  44        int refcount;
  45        int object_size;
  46        int align;
  47
  48/* 5) statistics */
  49#ifdef CONFIG_DEBUG_SLAB
  50        unsigned long num_active;
  51        unsigned long num_allocations;
  52        unsigned long high_mark;
  53        unsigned long grown;
  54        unsigned long reaped;
  55        unsigned long errors;
  56        unsigned long max_freeable;
  57        unsigned long node_allocs;
  58        unsigned long node_frees;
  59        unsigned long node_overflow;
  60        atomic_t allochit;
  61        atomic_t allocmiss;
  62        atomic_t freehit;
  63        atomic_t freemiss;
  64#ifdef CONFIG_DEBUG_SLAB_LEAK
  65        atomic_t store_user_clean;
  66#endif
  67
  68        /*
  69         * If debugging is enabled, then the allocator can add additional
  70         * fields and/or padding to every object. size contains the total
  71         * object size including these internal fields, the following two
  72         * variables contain the offset to the user object and its size.
  73         */
  74        int obj_offset;
  75#endif /* CONFIG_DEBUG_SLAB */
  76
  77#ifdef CONFIG_MEMCG
  78        struct memcg_cache_params memcg_params;
  79#endif
  80#ifdef CONFIG_KASAN
  81        struct kasan_cache kasan_info;
  82#endif
  83
  84#ifdef CONFIG_SLAB_FREELIST_RANDOM
  85        unsigned int *random_seq;
  86#endif
  87
  88        struct kmem_cache_node *node[MAX_NUMNODES];
  89};
  90
  91static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
  92                                void *x)
  93{
  94        void *object = x - (x - page->s_mem) % cache->size;
  95        void *last_object = page->s_mem + (cache->num - 1) * cache->size;
  96
  97        if (unlikely(object > last_object))
  98                return last_object;
  99        else
 100                return object;
 101}
 102
 103#endif  /* _LINUX_SLAB_DEF_H */
 104