linux/mm/slab.h
<<
>>
Prefs
   1#ifndef MM_SLAB_H
   2#define MM_SLAB_H
   3/*
   4 * Internal slab definitions
   5 */
   6
   7#ifdef CONFIG_SLOB
   8/*
   9 * Common fields provided in kmem_cache by all slab allocators
  10 * This struct is either used directly by the allocator (SLOB)
  11 * or the allocator must include definitions for all fields
  12 * provided in kmem_cache_common in their definition of kmem_cache.
  13 *
  14 * Once we can do anonymous structs (C11 standard) we could put a
  15 * anonymous struct definition in these allocators so that the
  16 * separate allocations in the kmem_cache structure of SLAB and
  17 * SLUB is no longer needed.
  18 */
  19struct kmem_cache {
  20        unsigned int object_size;/* The original size of the object */
  21        unsigned int size;      /* The aligned/padded/added on size  */
  22        unsigned int align;     /* Alignment as calculated */
  23        unsigned long flags;    /* Active flags on the slab */
  24        const char *name;       /* Slab name for sysfs */
  25        int refcount;           /* Use counter */
  26        void (*ctor)(void *);   /* Called on object slot creation */
  27        struct list_head list;  /* List of all slab caches on the system */
  28};
  29
  30#endif /* CONFIG_SLOB */
  31
  32#ifdef CONFIG_SLAB
  33#include <linux/slab_def.h>
  34#endif
  35
  36#ifdef CONFIG_SLUB
  37#include <linux/slub_def.h>
  38#endif
  39
  40#include <linux/memcontrol.h>
  41
  42/*
  43 * State of the slab allocator.
  44 *
  45 * This is used to describe the states of the allocator during bootup.
  46 * Allocators use this to gradually bootstrap themselves. Most allocators
  47 * have the problem that the structures used for managing slab caches are
  48 * allocated from slab caches themselves.
  49 */
  50enum slab_state {
  51        DOWN,                   /* No slab functionality yet */
  52        PARTIAL,                /* SLUB: kmem_cache_node available */
  53        PARTIAL_NODE,           /* SLAB: kmalloc size for node struct available */
  54        UP,                     /* Slab caches usable but not all extras yet */
  55        FULL                    /* Everything is working */
  56};
  57
  58extern enum slab_state slab_state;
  59
  60/* The slab cache mutex protects the management structures during changes */
  61extern struct mutex slab_mutex;
  62
  63/* The list of all slab caches on the system */
  64extern struct list_head slab_caches;
  65
  66/* The slab cache that manages slab cache information */
  67extern struct kmem_cache *kmem_cache;
  68
  69unsigned long calculate_alignment(unsigned long flags,
  70                unsigned long align, unsigned long size);
  71
  72#ifndef CONFIG_SLOB
  73/* Kmalloc array related functions */
  74void create_kmalloc_caches(unsigned long);
  75
  76/* Find the kmalloc slab corresponding for a certain size */
  77struct kmem_cache *kmalloc_slab(size_t, gfp_t);
  78#endif
  79
  80
  81/* Functions provided by the slab allocators */
  82extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
  83
  84extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
  85                        unsigned long flags);
  86extern void create_boot_cache(struct kmem_cache *, const char *name,
  87                        size_t size, unsigned long flags);
  88
  89int slab_unmergeable(struct kmem_cache *s);
  90struct kmem_cache *find_mergeable(size_t size, size_t align,
  91                unsigned long flags, const char *name, void (*ctor)(void *));
  92#ifndef CONFIG_SLOB
  93struct kmem_cache *
  94__kmem_cache_alias(const char *name, size_t size, size_t align,
  95                   unsigned long flags, void (*ctor)(void *));
  96
  97unsigned long kmem_cache_flags(unsigned long object_size,
  98        unsigned long flags, const char *name,
  99        void (*ctor)(void *));
 100#else
 101static inline struct kmem_cache *
 102__kmem_cache_alias(const char *name, size_t size, size_t align,
 103                   unsigned long flags, void (*ctor)(void *))
 104{ return NULL; }
 105
 106static inline unsigned long kmem_cache_flags(unsigned long object_size,
 107        unsigned long flags, const char *name,
 108        void (*ctor)(void *))
 109{
 110        return flags;
 111}
 112#endif
 113
 114
 115/* Legal flag mask for kmem_cache_create(), for various configurations */
 116#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
 117                         SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
 118
 119#if defined(CONFIG_DEBUG_SLAB)
 120#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 121#elif defined(CONFIG_SLUB_DEBUG)
 122#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
 123                          SLAB_TRACE | SLAB_DEBUG_FREE)
 124#else
 125#define SLAB_DEBUG_FLAGS (0)
 126#endif
 127
 128#if defined(CONFIG_SLAB)
 129#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
 130                          SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
 131#elif defined(CONFIG_SLUB)
 132#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
 133                          SLAB_TEMPORARY | SLAB_NOTRACK)
 134#else
 135#define SLAB_CACHE_FLAGS (0)
 136#endif
 137
 138#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
 139
 140int __kmem_cache_shutdown(struct kmem_cache *);
 141int __kmem_cache_shrink(struct kmem_cache *, bool);
 142void slab_kmem_cache_release(struct kmem_cache *);
 143
 144struct seq_file;
 145struct file;
 146
 147struct slabinfo {
 148        unsigned long active_objs;
 149        unsigned long num_objs;
 150        unsigned long active_slabs;
 151        unsigned long num_slabs;
 152        unsigned long shared_avail;
 153        unsigned int limit;
 154        unsigned int batchcount;
 155        unsigned int shared;
 156        unsigned int objects_per_slab;
 157        unsigned int cache_order;
 158};
 159
 160void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
 161void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
 162ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 163                       size_t count, loff_t *ppos);
 164
 165#ifdef CONFIG_MEMCG_KMEM
 166/*
 167 * Iterate over all memcg caches of the given root cache. The caller must hold
 168 * slab_mutex.
 169 */
 170#define for_each_memcg_cache(iter, root) \
 171        list_for_each_entry(iter, &(root)->memcg_params.list, \
 172                            memcg_params.list)
 173
 174#define for_each_memcg_cache_safe(iter, tmp, root) \
 175        list_for_each_entry_safe(iter, tmp, &(root)->memcg_params.list, \
 176                                 memcg_params.list)
 177
 178static inline bool is_root_cache(struct kmem_cache *s)
 179{
 180        return s->memcg_params.is_root_cache;
 181}
 182
 183static inline bool slab_equal_or_root(struct kmem_cache *s,
 184                                      struct kmem_cache *p)
 185{
 186        return p == s || p == s->memcg_params.root_cache;
 187}
 188
 189/*
 190 * We use suffixes to the name in memcg because we can't have caches
 191 * created in the system with the same name. But when we print them
 192 * locally, better refer to them with the base name
 193 */
 194static inline const char *cache_name(struct kmem_cache *s)
 195{
 196        if (!is_root_cache(s))
 197                s = s->memcg_params.root_cache;
 198        return s->name;
 199}
 200
 201/*
 202 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
 203 * That said the caller must assure the memcg's cache won't go away by either
 204 * taking a css reference to the owner cgroup, or holding the slab_mutex.
 205 */
 206static inline struct kmem_cache *
 207cache_from_memcg_idx(struct kmem_cache *s, int idx)
 208{
 209        struct kmem_cache *cachep;
 210        struct memcg_cache_array *arr;
 211
 212        rcu_read_lock();
 213        arr = rcu_dereference(s->memcg_params.memcg_caches);
 214
 215        /*
 216         * Make sure we will access the up-to-date value. The code updating
 217         * memcg_caches issues a write barrier to match this (see
 218         * memcg_create_kmem_cache()).
 219         */
 220        cachep = lockless_dereference(arr->entries[idx]);
 221        rcu_read_unlock();
 222
 223        return cachep;
 224}
 225
 226static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 227{
 228        if (is_root_cache(s))
 229                return s;
 230        return s->memcg_params.root_cache;
 231}
 232
 233static __always_inline int memcg_charge_slab(struct kmem_cache *s,
 234                                             gfp_t gfp, int order)
 235{
 236        if (!memcg_kmem_enabled())
 237                return 0;
 238        if (is_root_cache(s))
 239                return 0;
 240        return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order);
 241}
 242
 243static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
 244{
 245        if (!memcg_kmem_enabled())
 246                return;
 247        if (is_root_cache(s))
 248                return;
 249        memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order);
 250}
 251
 252extern void slab_init_memcg_params(struct kmem_cache *);
 253
 254#else /* !CONFIG_MEMCG_KMEM */
 255
 256#define for_each_memcg_cache(iter, root) \
 257        for ((void)(iter), (void)(root); 0; )
 258#define for_each_memcg_cache_safe(iter, tmp, root) \
 259        for ((void)(iter), (void)(tmp), (void)(root); 0; )
 260
 261static inline bool is_root_cache(struct kmem_cache *s)
 262{
 263        return true;
 264}
 265
 266static inline bool slab_equal_or_root(struct kmem_cache *s,
 267                                      struct kmem_cache *p)
 268{
 269        return true;
 270}
 271
 272static inline const char *cache_name(struct kmem_cache *s)
 273{
 274        return s->name;
 275}
 276
 277static inline struct kmem_cache *
 278cache_from_memcg_idx(struct kmem_cache *s, int idx)
 279{
 280        return NULL;
 281}
 282
 283static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 284{
 285        return s;
 286}
 287
 288static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
 289{
 290        return 0;
 291}
 292
 293static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
 294{
 295}
 296
 297static inline void slab_init_memcg_params(struct kmem_cache *s)
 298{
 299}
 300#endif /* CONFIG_MEMCG_KMEM */
 301
 302static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 303{
 304        struct kmem_cache *cachep;
 305        struct page *page;
 306
 307        /*
 308         * When kmemcg is not being used, both assignments should return the
 309         * same value. but we don't want to pay the assignment price in that
 310         * case. If it is not compiled in, the compiler should be smart enough
 311         * to not do even the assignment. In that case, slab_equal_or_root
 312         * will also be a constant.
 313         */
 314        if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
 315                return s;
 316
 317        page = virt_to_head_page(x);
 318        cachep = page->slab_cache;
 319        if (slab_equal_or_root(cachep, s))
 320                return cachep;
 321
 322        pr_err("%s: Wrong slab cache. %s but object is from %s\n",
 323               __func__, cachep->name, s->name);
 324        WARN_ON_ONCE(1);
 325        return s;
 326}
 327
 328#ifndef CONFIG_SLOB
 329/*
 330 * The slab lists for all objects.
 331 */
 332struct kmem_cache_node {
 333        spinlock_t list_lock;
 334
 335#ifdef CONFIG_SLAB
 336        struct list_head slabs_partial; /* partial list first, better asm code */
 337        struct list_head slabs_full;
 338        struct list_head slabs_free;
 339        unsigned long free_objects;
 340        unsigned int free_limit;
 341        unsigned int colour_next;       /* Per-node cache coloring */
 342        struct array_cache *shared;     /* shared per node */
 343        struct alien_cache **alien;     /* on other nodes */
 344        unsigned long next_reap;        /* updated without locking */
 345        int free_touched;               /* updated without locking */
 346#endif
 347
 348#ifdef CONFIG_SLUB
 349        unsigned long nr_partial;
 350        struct list_head partial;
 351#ifdef CONFIG_SLUB_DEBUG
 352        atomic_long_t nr_slabs;
 353        atomic_long_t total_objects;
 354        struct list_head full;
 355#endif
 356#endif
 357
 358};
 359
 360static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
 361{
 362        return s->node[node];
 363}
 364
 365/*
 366 * Iterator over all nodes. The body will be executed for each node that has
 367 * a kmem_cache_node structure allocated (which is true for all online nodes)
 368 */
 369#define for_each_kmem_cache_node(__s, __node, __n) \
 370        for (__node = 0; __node < nr_node_ids; __node++) \
 371                 if ((__n = get_node(__s, __node)))
 372
 373#endif
 374
 375void *slab_start(struct seq_file *m, loff_t *pos);
 376void *slab_next(struct seq_file *m, void *p, loff_t *pos);
 377void slab_stop(struct seq_file *m, void *p);
 378int memcg_slab_show(struct seq_file *m, void *p);
 379
 380#endif /* MM_SLAB_H */
 381