linux/mm/slab.h
<<
>>
Prefs
   1#ifndef MM_SLAB_H
   2#define MM_SLAB_H
   3/*
   4 * Internal slab definitions
   5 */
   6
   7/*
   8 * State of the slab allocator.
   9 *
  10 * This is used to describe the states of the allocator during bootup.
  11 * Allocators use this to gradually bootstrap themselves. Most allocators
  12 * have the problem that the structures used for managing slab caches are
  13 * allocated from slab caches themselves.
  14 */
  15enum slab_state {
  16        DOWN,                   /* No slab functionality yet */
  17        PARTIAL,                /* SLUB: kmem_cache_node available */
  18        PARTIAL_ARRAYCACHE,     /* SLAB: kmalloc size for arraycache available */
  19        PARTIAL_NODE,           /* SLAB: kmalloc size for node struct available */
  20        UP,                     /* Slab caches usable but not all extras yet */
  21        FULL                    /* Everything is working */
  22};
  23
  24extern enum slab_state slab_state;
  25
  26/* The slab cache mutex protects the management structures during changes */
  27extern struct mutex slab_mutex;
  28
  29/* The list of all slab caches on the system */
  30extern struct list_head slab_caches;
  31
  32/* The slab cache that manages slab cache information */
  33extern struct kmem_cache *kmem_cache;
  34
  35unsigned long calculate_alignment(unsigned long flags,
  36                unsigned long align, unsigned long size);
  37
  38#ifndef CONFIG_SLOB
  39/* Kmalloc array related functions */
  40void create_kmalloc_caches(unsigned long);
  41
  42/* Find the kmalloc slab corresponding for a certain size */
  43struct kmem_cache *kmalloc_slab(size_t, gfp_t);
  44#endif
  45
  46
  47/* Functions provided by the slab allocators */
  48extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
  49
  50extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
  51                        unsigned long flags);
  52extern void create_boot_cache(struct kmem_cache *, const char *name,
  53                        size_t size, unsigned long flags);
  54
  55struct mem_cgroup;
  56#ifdef CONFIG_SLUB
  57struct kmem_cache *
  58__kmem_cache_alias(const char *name, size_t size, size_t align,
  59                   unsigned long flags, void (*ctor)(void *));
  60#else
  61static inline struct kmem_cache *
  62__kmem_cache_alias(const char *name, size_t size, size_t align,
  63                   unsigned long flags, void (*ctor)(void *))
  64{ return NULL; }
  65#endif
  66
  67
  68/* Legal flag mask for kmem_cache_create(), for various configurations */
  69#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
  70                         SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
  71
  72#if defined(CONFIG_DEBUG_SLAB)
  73#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
  74#elif defined(CONFIG_SLUB_DEBUG)
  75#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
  76                          SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
  77#else
  78#define SLAB_DEBUG_FLAGS (0)
  79#endif
  80
  81#if defined(CONFIG_SLAB)
  82#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
  83                          SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
  84                          SLAB_NOTRACK | SLAB_ACCOUNT)
  85#elif defined(CONFIG_SLUB)
  86#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
  87                          SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
  88#else
  89#define SLAB_CACHE_FLAGS (0)
  90#endif
  91
  92#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
  93
  94int __kmem_cache_shutdown(struct kmem_cache *);
  95
  96struct seq_file;
  97struct file;
  98
  99struct slabinfo {
 100        unsigned long active_objs;
 101        unsigned long num_objs;
 102        unsigned long active_slabs;
 103        unsigned long num_slabs;
 104        unsigned long shared_avail;
 105        unsigned int limit;
 106        unsigned int batchcount;
 107        unsigned int shared;
 108        unsigned int objects_per_slab;
 109        unsigned int cache_order;
 110};
 111
 112void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
 113void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
 114ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 115                       size_t count, loff_t *ppos);
 116
 117/*
 118 * Generic implementation of bulk operations
 119 * These are useful for situations in which the allocator cannot
 120 * perform optimizations. In that case segments of the objecct listed
 121 * may be allocated or freed using these operations.
 122 */
 123void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
 124int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
 125
 126#ifdef CONFIG_MEMCG_KMEM
 127static inline bool is_root_cache(struct kmem_cache *s)
 128{
 129        return !s->memcg_params || s->memcg_params->is_root_cache;
 130}
 131
 132static inline void memcg_bind_pages(struct kmem_cache *s, int order)
 133{
 134        if (!is_root_cache(s))
 135                atomic_add(1 << order, &s->memcg_params->nr_pages);
 136}
 137
 138static inline void memcg_release_pages(struct kmem_cache *s, int order)
 139{
 140        if (!is_root_cache(s))
 141                atomic_sub(1 << order, &s->memcg_params->nr_pages);
 142}
 143
 144static inline bool slab_equal_or_root(struct kmem_cache *s,
 145                                        struct kmem_cache *p)
 146{
 147        return (p == s) ||
 148                (s->memcg_params && (p == s->memcg_params->root_cache));
 149}
 150
 151/*
 152 * We use suffixes to the name in memcg because we can't have caches
 153 * created in the system with the same name. But when we print them
 154 * locally, better refer to them with the base name
 155 */
 156static inline const char *cache_name(struct kmem_cache *s)
 157{
 158        if (!is_root_cache(s))
 159                return s->memcg_params->root_cache->name;
 160        return s->name;
 161}
 162
 163/*
 164 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
 165 * That said the caller must assure the memcg's cache won't go away. Since once
 166 * created a memcg's cache is destroyed only along with the root cache, it is
 167 * true if we are going to allocate from the cache or hold a reference to the
 168 * root cache by other means. Otherwise, we should hold either the slab_mutex
 169 * or the memcg's slab_caches_mutex while calling this function and accessing
 170 * the returned value.
 171 */
 172static inline struct kmem_cache *
 173cache_from_memcg_idx(struct kmem_cache *s, int idx)
 174{
 175        struct kmem_cache *cachep;
 176        struct memcg_cache_params *params;
 177
 178        if (!s->memcg_params)
 179                return NULL;
 180
 181        rcu_read_lock();
 182        params = rcu_dereference(s->memcg_params);
 183        cachep = params->memcg_caches[idx];
 184        rcu_read_unlock();
 185
 186        /*
 187         * Make sure we will access the up-to-date value. The code updating
 188         * memcg_caches issues a write barrier to match this (see
 189         * memcg_register_cache()).
 190         */
 191        smp_read_barrier_depends();
 192        return cachep;
 193}
 194
 195static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 196{
 197        if (is_root_cache(s))
 198                return s;
 199        return s->memcg_params->root_cache;
 200}
 201
 202static __always_inline int memcg_charge_slab(struct kmem_cache *s,
 203                                             gfp_t gfp, int order)
 204{
 205        if (!memcg_kmem_enabled())
 206                return 0;
 207        if (is_root_cache(s))
 208                return 0;
 209        return memcg_charge_kmem(s->memcg_params->memcg, gfp, 1 << order);
 210}
 211
 212static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
 213{
 214        if (!memcg_kmem_enabled())
 215                return;
 216        if (is_root_cache(s))
 217                return;
 218        memcg_uncharge_kmem(s->memcg_params->memcg, 1 << order);
 219}
 220#else
 221static inline bool is_root_cache(struct kmem_cache *s)
 222{
 223        return true;
 224}
 225
 226static inline void memcg_bind_pages(struct kmem_cache *s, int order)
 227{
 228}
 229
 230static inline void memcg_release_pages(struct kmem_cache *s, int order)
 231{
 232}
 233
 234static inline bool slab_equal_or_root(struct kmem_cache *s,
 235                                      struct kmem_cache *p)
 236{
 237        return true;
 238}
 239
 240static inline const char *cache_name(struct kmem_cache *s)
 241{
 242        return s->name;
 243}
 244
 245static inline struct kmem_cache *
 246cache_from_memcg_idx(struct kmem_cache *s, int idx)
 247{
 248        return NULL;
 249}
 250
 251static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 252{
 253        return s;
 254}
 255
 256static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
 257{
 258        return 0;
 259}
 260
 261static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
 262{
 263}
 264#endif
 265
 266static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 267{
 268        struct kmem_cache *cachep;
 269        struct page *page;
 270
 271        /*
 272         * When kmemcg is not being used, both assignments should return the
 273         * same value. but we don't want to pay the assignment price in that
 274         * case. If it is not compiled in, the compiler should be smart enough
 275         * to not do even the assignment. In that case, slab_equal_or_root
 276         * will also be a constant.
 277         */
 278        if (!memcg_kmem_enabled() &&
 279            !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
 280                return s;
 281
 282        page = virt_to_head_page(x);
 283        cachep = page->slab_cache;
 284        if (slab_equal_or_root(cachep, s))
 285                return cachep;
 286
 287        pr_err("%s: Wrong slab cache. %s but object is from %s\n",
 288               __FUNCTION__, s->name, cachep->name);
 289        WARN_ON_ONCE(1);
 290        return s;
 291}
 292#endif
 293
 294
 295/*
 296 * The slab lists for all objects.
 297 */
 298struct kmem_cache_node {
 299        spinlock_t list_lock;
 300
 301#ifdef CONFIG_SLAB
 302        struct list_head slabs_partial; /* partial list first, better asm code */
 303        struct list_head slabs_full;
 304        struct list_head slabs_free;
 305        unsigned long free_objects;
 306        unsigned int free_limit;
 307        unsigned int colour_next;       /* Per-node cache coloring */
 308        struct array_cache *shared;     /* shared per node */
 309        struct array_cache **alien;     /* on other nodes */
 310        unsigned long next_reap;        /* updated without locking */
 311        int free_touched;               /* updated without locking */
 312#endif
 313
 314#ifdef CONFIG_SLUB
 315        unsigned long nr_partial;
 316        struct list_head partial;
 317#ifdef CONFIG_SLUB_DEBUG
 318        atomic_long_t nr_slabs;
 319        atomic_long_t total_objects;
 320        struct list_head full;
 321#endif
 322#endif
 323
 324};
 325