linux/mm/slab.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef MM_SLAB_H
   3#define MM_SLAB_H
   4/*
   5 * Internal slab definitions
   6 */
   7
   8#ifdef CONFIG_SLOB
   9/*
  10 * Common fields provided in kmem_cache by all slab allocators
  11 * This struct is either used directly by the allocator (SLOB)
  12 * or the allocator must include definitions for all fields
  13 * provided in kmem_cache_common in their definition of kmem_cache.
  14 *
  15 * Once we can do anonymous structs (C11 standard) we could put a
  16 * anonymous struct definition in these allocators so that the
  17 * separate allocations in the kmem_cache structure of SLAB and
  18 * SLUB is no longer needed.
  19 */
  20struct kmem_cache {
  21        unsigned int object_size;/* The original size of the object */
  22        unsigned int size;      /* The aligned/padded/added on size  */
  23        unsigned int align;     /* Alignment as calculated */
  24        slab_flags_t flags;     /* Active flags on the slab */
  25        unsigned int useroffset;/* Usercopy region offset */
  26        unsigned int usersize;  /* Usercopy region size */
  27        const char *name;       /* Slab name for sysfs */
  28        int refcount;           /* Use counter */
  29        void (*ctor)(void *);   /* Called on object slot creation */
  30        struct list_head list;  /* List of all slab caches on the system */
  31};
  32
  33#endif /* CONFIG_SLOB */
  34
  35#ifdef CONFIG_SLAB
  36#include <linux/slab_def.h>
  37#endif
  38
  39#ifdef CONFIG_SLUB
  40#include <linux/slub_def.h>
  41#endif
  42
  43#include <linux/memcontrol.h>
  44#include <linux/fault-inject.h>
  45#include <linux/kasan.h>
  46#include <linux/kmemleak.h>
  47#include <linux/random.h>
  48#include <linux/sched/mm.h>
  49
  50/*
  51 * State of the slab allocator.
  52 *
  53 * This is used to describe the states of the allocator during bootup.
  54 * Allocators use this to gradually bootstrap themselves. Most allocators
  55 * have the problem that the structures used for managing slab caches are
  56 * allocated from slab caches themselves.
  57 */
  58enum slab_state {
  59        DOWN,                   /* No slab functionality yet */
  60        PARTIAL,                /* SLUB: kmem_cache_node available */
  61        PARTIAL_NODE,           /* SLAB: kmalloc size for node struct available */
  62        UP,                     /* Slab caches usable but not all extras yet */
  63        FULL                    /* Everything is working */
  64};
  65
  66extern enum slab_state slab_state;
  67
  68/* The slab cache mutex protects the management structures during changes */
  69extern struct mutex slab_mutex;
  70
  71/* The list of all slab caches on the system */
  72extern struct list_head slab_caches;
  73
  74/* The slab cache that manages slab cache information */
  75extern struct kmem_cache *kmem_cache;
  76
  77/* A table of kmalloc cache names and sizes */
  78extern const struct kmalloc_info_struct {
  79        const char *name;
  80        unsigned int size;
  81} kmalloc_info[];
  82
  83#ifndef CONFIG_SLOB
  84/* Kmalloc array related functions */
  85void setup_kmalloc_cache_index_table(void);
  86void create_kmalloc_caches(slab_flags_t);
  87
  88/* Find the kmalloc slab corresponding for a certain size */
  89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
  90#endif
  91
  92
  93/* Functions provided by the slab allocators */
  94int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
  95
  96struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
  97                        slab_flags_t flags, unsigned int useroffset,
  98                        unsigned int usersize);
  99extern void create_boot_cache(struct kmem_cache *, const char *name,
 100                        unsigned int size, slab_flags_t flags,
 101                        unsigned int useroffset, unsigned int usersize);
 102
 103int slab_unmergeable(struct kmem_cache *s);
 104struct kmem_cache *find_mergeable(unsigned size, unsigned align,
 105                slab_flags_t flags, const char *name, void (*ctor)(void *));
 106#ifndef CONFIG_SLOB
 107struct kmem_cache *
 108__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 109                   slab_flags_t flags, void (*ctor)(void *));
 110
 111slab_flags_t kmem_cache_flags(unsigned int object_size,
 112        slab_flags_t flags, const char *name,
 113        void (*ctor)(void *));
 114#else
 115static inline struct kmem_cache *
 116__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 117                   slab_flags_t flags, void (*ctor)(void *))
 118{ return NULL; }
 119
 120static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
 121        slab_flags_t flags, const char *name,
 122        void (*ctor)(void *))
 123{
 124        return flags;
 125}
 126#endif
 127
 128
 129/* Legal flag mask for kmem_cache_create(), for various configurations */
 130#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
 131                         SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
 132
 133#if defined(CONFIG_DEBUG_SLAB)
 134#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 135#elif defined(CONFIG_SLUB_DEBUG)
 136#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
 137                          SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
 138#else
 139#define SLAB_DEBUG_FLAGS (0)
 140#endif
 141
 142#if defined(CONFIG_SLAB)
 143#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
 144                          SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
 145                          SLAB_ACCOUNT)
 146#elif defined(CONFIG_SLUB)
 147#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
 148                          SLAB_TEMPORARY | SLAB_ACCOUNT)
 149#else
 150#define SLAB_CACHE_FLAGS (0)
 151#endif
 152
 153/* Common flags available with current configuration */
 154#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
 155
 156/* Common flags permitted for kmem_cache_create */
 157#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
 158                              SLAB_RED_ZONE | \
 159                              SLAB_POISON | \
 160                              SLAB_STORE_USER | \
 161                              SLAB_TRACE | \
 162                              SLAB_CONSISTENCY_CHECKS | \
 163                              SLAB_MEM_SPREAD | \
 164                              SLAB_NOLEAKTRACE | \
 165                              SLAB_RECLAIM_ACCOUNT | \
 166                              SLAB_TEMPORARY | \
 167                              SLAB_ACCOUNT)
 168
 169bool __kmem_cache_empty(struct kmem_cache *);
 170int __kmem_cache_shutdown(struct kmem_cache *);
 171void __kmem_cache_release(struct kmem_cache *);
 172int __kmem_cache_shrink(struct kmem_cache *);
 173void __kmemcg_cache_deactivate(struct kmem_cache *s);
 174void slab_kmem_cache_release(struct kmem_cache *);
 175
 176struct seq_file;
 177struct file;
 178
 179struct slabinfo {
 180        unsigned long active_objs;
 181        unsigned long num_objs;
 182        unsigned long active_slabs;
 183        unsigned long num_slabs;
 184        unsigned long shared_avail;
 185        unsigned int limit;
 186        unsigned int batchcount;
 187        unsigned int shared;
 188        unsigned int objects_per_slab;
 189        unsigned int cache_order;
 190};
 191
 192void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
 193void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
 194ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 195                       size_t count, loff_t *ppos);
 196
 197/*
 198 * Generic implementation of bulk operations
 199 * These are useful for situations in which the allocator cannot
 200 * perform optimizations. In that case segments of the object listed
 201 * may be allocated or freed using these operations.
 202 */
 203void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
 204int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
 205
 206#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 207
 208/* List of all root caches. */
 209extern struct list_head         slab_root_caches;
 210#define root_caches_node        memcg_params.__root_caches_node
 211
 212/*
 213 * Iterate over all memcg caches of the given root cache. The caller must hold
 214 * slab_mutex.
 215 */
 216#define for_each_memcg_cache(iter, root) \
 217        list_for_each_entry(iter, &(root)->memcg_params.children, \
 218                            memcg_params.children_node)
 219
 220static inline bool is_root_cache(struct kmem_cache *s)
 221{
 222        return !s->memcg_params.root_cache;
 223}
 224
 225static inline bool slab_equal_or_root(struct kmem_cache *s,
 226                                      struct kmem_cache *p)
 227{
 228        return p == s || p == s->memcg_params.root_cache;
 229}
 230
 231/*
 232 * We use suffixes to the name in memcg because we can't have caches
 233 * created in the system with the same name. But when we print them
 234 * locally, better refer to them with the base name
 235 */
 236static inline const char *cache_name(struct kmem_cache *s)
 237{
 238        if (!is_root_cache(s))
 239                s = s->memcg_params.root_cache;
 240        return s->name;
 241}
 242
 243/*
 244 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
 245 * That said the caller must assure the memcg's cache won't go away by either
 246 * taking a css reference to the owner cgroup, or holding the slab_mutex.
 247 */
 248static inline struct kmem_cache *
 249cache_from_memcg_idx(struct kmem_cache *s, int idx)
 250{
 251        struct kmem_cache *cachep;
 252        struct memcg_cache_array *arr;
 253
 254        rcu_read_lock();
 255        arr = rcu_dereference(s->memcg_params.memcg_caches);
 256
 257        /*
 258         * Make sure we will access the up-to-date value. The code updating
 259         * memcg_caches issues a write barrier to match this (see
 260         * memcg_create_kmem_cache()).
 261         */
 262        cachep = READ_ONCE(arr->entries[idx]);
 263        rcu_read_unlock();
 264
 265        return cachep;
 266}
 267
 268static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 269{
 270        if (is_root_cache(s))
 271                return s;
 272        return s->memcg_params.root_cache;
 273}
 274
 275static __always_inline int memcg_charge_slab(struct page *page,
 276                                             gfp_t gfp, int order,
 277                                             struct kmem_cache *s)
 278{
 279        if (!memcg_kmem_enabled())
 280                return 0;
 281        if (is_root_cache(s))
 282                return 0;
 283        return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
 284}
 285
 286static __always_inline void memcg_uncharge_slab(struct page *page, int order,
 287                                                struct kmem_cache *s)
 288{
 289        if (!memcg_kmem_enabled())
 290                return;
 291        memcg_kmem_uncharge(page, order);
 292}
 293
 294extern void slab_init_memcg_params(struct kmem_cache *);
 295extern void memcg_link_cache(struct kmem_cache *s);
 296extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
 297                                void (*deact_fn)(struct kmem_cache *));
 298
 299#else /* CONFIG_MEMCG && !CONFIG_SLOB */
 300
 301/* If !memcg, all caches are root. */
 302#define slab_root_caches        slab_caches
 303#define root_caches_node        list
 304
 305#define for_each_memcg_cache(iter, root) \
 306        for ((void)(iter), (void)(root); 0; )
 307
 308static inline bool is_root_cache(struct kmem_cache *s)
 309{
 310        return true;
 311}
 312
 313static inline bool slab_equal_or_root(struct kmem_cache *s,
 314                                      struct kmem_cache *p)
 315{
 316        return true;
 317}
 318
 319static inline const char *cache_name(struct kmem_cache *s)
 320{
 321        return s->name;
 322}
 323
 324static inline struct kmem_cache *
 325cache_from_memcg_idx(struct kmem_cache *s, int idx)
 326{
 327        return NULL;
 328}
 329
 330static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 331{
 332        return s;
 333}
 334
 335static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
 336                                    struct kmem_cache *s)
 337{
 338        return 0;
 339}
 340
 341static inline void memcg_uncharge_slab(struct page *page, int order,
 342                                       struct kmem_cache *s)
 343{
 344}
 345
 346static inline void slab_init_memcg_params(struct kmem_cache *s)
 347{
 348}
 349
 350static inline void memcg_link_cache(struct kmem_cache *s)
 351{
 352}
 353
 354#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 355
 356static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 357{
 358        struct kmem_cache *cachep;
 359        struct page *page;
 360
 361        /*
 362         * When kmemcg is not being used, both assignments should return the
 363         * same value. but we don't want to pay the assignment price in that
 364         * case. If it is not compiled in, the compiler should be smart enough
 365         * to not do even the assignment. In that case, slab_equal_or_root
 366         * will also be a constant.
 367         */
 368        if (!memcg_kmem_enabled() &&
 369            !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
 370                return s;
 371
 372        page = virt_to_head_page(x);
 373        cachep = page->slab_cache;
 374        if (slab_equal_or_root(cachep, s))
 375                return cachep;
 376
 377        pr_err("%s: Wrong slab cache. %s but object is from %s\n",
 378               __func__, s->name, cachep->name);
 379        WARN_ON_ONCE(1);
 380        return s;
 381}
 382
 383static inline size_t slab_ksize(const struct kmem_cache *s)
 384{
 385#ifndef CONFIG_SLUB
 386        return s->object_size;
 387
 388#else /* CONFIG_SLUB */
 389# ifdef CONFIG_SLUB_DEBUG
 390        /*
 391         * Debugging requires use of the padding between object
 392         * and whatever may come after it.
 393         */
 394        if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
 395                return s->object_size;
 396# endif
 397        if (s->flags & SLAB_KASAN)
 398                return s->object_size;
 399        /*
 400         * If we have the need to store the freelist pointer
 401         * back there or track user information then we can
 402         * only use the space before that information.
 403         */
 404        if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
 405                return s->inuse;
 406        /*
 407         * Else we can use all the padding etc for the allocation
 408         */
 409        return s->size;
 410#endif
 411}
 412
 413static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
 414                                                     gfp_t flags)
 415{
 416        flags &= gfp_allowed_mask;
 417
 418        fs_reclaim_acquire(flags);
 419        fs_reclaim_release(flags);
 420
 421        might_sleep_if(gfpflags_allow_blocking(flags));
 422
 423        if (should_failslab(s, flags))
 424                return NULL;
 425
 426        if (memcg_kmem_enabled() &&
 427            ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
 428                return memcg_kmem_get_cache(s);
 429
 430        return s;
 431}
 432
 433static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
 434                                        size_t size, void **p)
 435{
 436        size_t i;
 437
 438        flags &= gfp_allowed_mask;
 439        for (i = 0; i < size; i++) {
 440                void *object = p[i];
 441
 442                kmemleak_alloc_recursive(object, s->object_size, 1,
 443                                         s->flags, flags);
 444                kasan_slab_alloc(s, object, flags);
 445        }
 446
 447        if (memcg_kmem_enabled())
 448                memcg_kmem_put_cache(s);
 449}
 450
 451#ifndef CONFIG_SLOB
 452/*
 453 * The slab lists for all objects.
 454 */
 455struct kmem_cache_node {
 456        spinlock_t list_lock;
 457
 458#ifdef CONFIG_SLAB
 459        struct list_head slabs_partial; /* partial list first, better asm code */
 460        struct list_head slabs_full;
 461        struct list_head slabs_free;
 462        unsigned long total_slabs;      /* length of all slab lists */
 463        unsigned long free_slabs;       /* length of free slab list only */
 464        unsigned long free_objects;
 465        unsigned int free_limit;
 466        unsigned int colour_next;       /* Per-node cache coloring */
 467        struct array_cache *shared;     /* shared per node */
 468        struct alien_cache **alien;     /* on other nodes */
 469        unsigned long next_reap;        /* updated without locking */
 470        int free_touched;               /* updated without locking */
 471#endif
 472
 473#ifdef CONFIG_SLUB
 474        unsigned long nr_partial;
 475        struct list_head partial;
 476#ifdef CONFIG_SLUB_DEBUG
 477        atomic_long_t nr_slabs;
 478        atomic_long_t total_objects;
 479        struct list_head full;
 480#endif
 481#endif
 482
 483};
 484
 485static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
 486{
 487        return s->node[node];
 488}
 489
 490/*
 491 * Iterator over all nodes. The body will be executed for each node that has
 492 * a kmem_cache_node structure allocated (which is true for all online nodes)
 493 */
 494#define for_each_kmem_cache_node(__s, __node, __n) \
 495        for (__node = 0; __node < nr_node_ids; __node++) \
 496                 if ((__n = get_node(__s, __node)))
 497
 498#endif
 499
 500void *slab_start(struct seq_file *m, loff_t *pos);
 501void *slab_next(struct seq_file *m, void *p, loff_t *pos);
 502void slab_stop(struct seq_file *m, void *p);
 503void *memcg_slab_start(struct seq_file *m, loff_t *pos);
 504void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
 505void memcg_slab_stop(struct seq_file *m, void *p);
 506int memcg_slab_show(struct seq_file *m, void *p);
 507
 508#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
 509void dump_unreclaimable_slab(void);
 510#else
 511static inline void dump_unreclaimable_slab(void)
 512{
 513}
 514#endif
 515
 516void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
 517
 518#ifdef CONFIG_SLAB_FREELIST_RANDOM
 519int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
 520                        gfp_t gfp);
 521void cache_random_seq_destroy(struct kmem_cache *cachep);
 522#else
 523static inline int cache_random_seq_create(struct kmem_cache *cachep,
 524                                        unsigned int count, gfp_t gfp)
 525{
 526        return 0;
 527}
 528static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
 529#endif /* CONFIG_SLAB_FREELIST_RANDOM */
 530
 531#endif /* MM_SLAB_H */
 532