linux/include/linux/slab_def.h
<<
>>
Prefs
   1#ifndef _LINUX_SLAB_DEF_H
   2#define _LINUX_SLAB_DEF_H
   3
   4#include <linux/reciprocal_div.h>
   5
   6/*
   7 * Definitions unique to the original Linux SLAB allocator.
   8 */
   9
  10struct kmem_cache {
  11/* 1) Cache tunables. Protected by slab_mutex */
  12        unsigned int batchcount;
  13        unsigned int limit;
  14        unsigned int shared;
  15
  16        unsigned int size;
  17        struct reciprocal_value reciprocal_buffer_size;
  18/* 2) touched by every alloc & free from the backend */
  19
  20        unsigned int flags;             /* constant flags */
  21        unsigned int num;               /* # of objs per slab */
  22
  23/* 3) cache_grow/shrink */
  24        /* order of pgs per slab (2^n) */
  25        unsigned int gfporder;
  26
  27        /* force GFP flags, e.g. GFP_DMA */
  28        gfp_t allocflags;
  29
  30        size_t colour;                  /* cache colouring range */
  31        unsigned int colour_off;        /* colour offset */
  32        struct kmem_cache *freelist_cache;
  33        unsigned int freelist_size;
  34
  35        /* constructor func */
  36        void (*ctor)(void *obj);
  37
  38/* 4) cache creation/removal */
  39        const char *name;
  40        struct list_head list;
  41        int refcount;
  42        int object_size;
  43        int align;
  44
  45/* 5) statistics */
  46#ifdef CONFIG_DEBUG_SLAB
  47        unsigned long num_active;
  48        unsigned long num_allocations;
  49        unsigned long high_mark;
  50        unsigned long grown;
  51        unsigned long reaped;
  52        unsigned long errors;
  53        unsigned long max_freeable;
  54        unsigned long node_allocs;
  55        unsigned long node_frees;
  56        unsigned long node_overflow;
  57        atomic_t allochit;
  58        atomic_t allocmiss;
  59        atomic_t freehit;
  60        atomic_t freemiss;
  61
  62        /*
  63         * If debugging is enabled, then the allocator can add additional
  64         * fields and/or padding to every object. size contains the total
  65         * object size including these internal fields, the following two
  66         * variables contain the offset to the user object and its size.
  67         */
  68        int obj_offset;
  69#endif /* CONFIG_DEBUG_SLAB */
  70#ifdef CONFIG_MEMCG_KMEM
  71        struct memcg_cache_params *memcg_params;
  72#endif
  73
  74/* 6) per-cpu/per-node data, touched during every alloc/free */
  75        /*
  76         * We put array[] at the end of kmem_cache, because we want to size
  77         * this array to nr_cpu_ids slots instead of NR_CPUS
  78         * (see kmem_cache_init())
  79         * We still use [NR_CPUS] and not [1] or [0] because cache_cache
  80         * is statically defined, so we reserve the max number of cpus.
  81         *
  82         * We also need to guarantee that the list is able to accomodate a
  83         * pointer for each node since "nodelists" uses the remainder of
  84         * available pointers.
  85         */
  86        struct kmem_cache_node **node;
  87        struct array_cache *array[NR_CPUS + MAX_NUMNODES];
  88        /*
  89         * Do not add fields after array[]
  90         */
  91};
  92
  93#endif  /* _LINUX_SLAB_DEF_H */
  94