linux/mm/slab.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef MM_SLAB_H
   3#define MM_SLAB_H
   4/*
   5 * Internal slab definitions
   6 */
   7
   8#ifdef CONFIG_SLOB
   9/*
  10 * Common fields provided in kmem_cache by all slab allocators
  11 * This struct is either used directly by the allocator (SLOB)
  12 * or the allocator must include definitions for all fields
  13 * provided in kmem_cache_common in their definition of kmem_cache.
  14 *
  15 * Once we can do anonymous structs (C11 standard) we could put a
  16 * anonymous struct definition in these allocators so that the
  17 * separate allocations in the kmem_cache structure of SLAB and
  18 * SLUB is no longer needed.
  19 */
  20struct kmem_cache {
  21        unsigned int object_size;/* The original size of the object */
  22        unsigned int size;      /* The aligned/padded/added on size  */
  23        unsigned int align;     /* Alignment as calculated */
  24        slab_flags_t flags;     /* Active flags on the slab */
  25        unsigned int useroffset;/* Usercopy region offset */
  26        unsigned int usersize;  /* Usercopy region size */
  27        const char *name;       /* Slab name for sysfs */
  28        int refcount;           /* Use counter */
  29        void (*ctor)(void *);   /* Called on object slot creation */
  30        struct list_head list;  /* List of all slab caches on the system */
  31};
  32
  33#endif /* CONFIG_SLOB */
  34
  35#ifdef CONFIG_SLAB
  36#include <linux/slab_def.h>
  37#endif
  38
  39#ifdef CONFIG_SLUB
  40#include <linux/slub_def.h>
  41#endif
  42
  43#include <linux/memcontrol.h>
  44#include <linux/fault-inject.h>
  45#include <linux/kasan.h>
  46#include <linux/kmemleak.h>
  47#include <linux/random.h>
  48#include <linux/sched/mm.h>
  49
  50/*
  51 * State of the slab allocator.
  52 *
  53 * This is used to describe the states of the allocator during bootup.
  54 * Allocators use this to gradually bootstrap themselves. Most allocators
  55 * have the problem that the structures used for managing slab caches are
  56 * allocated from slab caches themselves.
  57 */
  58enum slab_state {
  59        DOWN,                   /* No slab functionality yet */
  60        PARTIAL,                /* SLUB: kmem_cache_node available */
  61        PARTIAL_NODE,           /* SLAB: kmalloc size for node struct available */
  62        UP,                     /* Slab caches usable but not all extras yet */
  63        FULL                    /* Everything is working */
  64};
  65
  66extern enum slab_state slab_state;
  67
  68/* The slab cache mutex protects the management structures during changes */
  69extern struct mutex slab_mutex;
  70
  71/* The list of all slab caches on the system */
  72extern struct list_head slab_caches;
  73
  74/* The slab cache that manages slab cache information */
  75extern struct kmem_cache *kmem_cache;
  76
  77/* A table of kmalloc cache names and sizes */
  78extern const struct kmalloc_info_struct {
  79        const char *name[NR_KMALLOC_TYPES];
  80        unsigned int size;
  81} kmalloc_info[];
  82
  83#ifndef CONFIG_SLOB
  84/* Kmalloc array related functions */
  85void setup_kmalloc_cache_index_table(void);
  86void create_kmalloc_caches(slab_flags_t);
  87
  88/* Find the kmalloc slab corresponding for a certain size */
  89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
  90#endif
  91
  92gfp_t kmalloc_fix_flags(gfp_t flags);
  93
  94/* Functions provided by the slab allocators */
  95int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
  96
  97struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
  98                        slab_flags_t flags, unsigned int useroffset,
  99                        unsigned int usersize);
 100extern void create_boot_cache(struct kmem_cache *, const char *name,
 101                        unsigned int size, slab_flags_t flags,
 102                        unsigned int useroffset, unsigned int usersize);
 103
 104int slab_unmergeable(struct kmem_cache *s);
 105struct kmem_cache *find_mergeable(unsigned size, unsigned align,
 106                slab_flags_t flags, const char *name, void (*ctor)(void *));
 107#ifndef CONFIG_SLOB
 108struct kmem_cache *
 109__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 110                   slab_flags_t flags, void (*ctor)(void *));
 111
 112slab_flags_t kmem_cache_flags(unsigned int object_size,
 113        slab_flags_t flags, const char *name);
 114#else
 115static inline struct kmem_cache *
 116__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 117                   slab_flags_t flags, void (*ctor)(void *))
 118{ return NULL; }
 119
 120static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
 121        slab_flags_t flags, const char *name)
 122{
 123        return flags;
 124}
 125#endif
 126
 127
 128/* Legal flag mask for kmem_cache_create(), for various configurations */
 129#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
 130                         SLAB_CACHE_DMA32 | SLAB_PANIC | \
 131                         SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
 132
 133#if defined(CONFIG_DEBUG_SLAB)
 134#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 135#elif defined(CONFIG_SLUB_DEBUG)
 136#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
 137                          SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
 138#else
 139#define SLAB_DEBUG_FLAGS (0)
 140#endif
 141
 142#if defined(CONFIG_SLAB)
 143#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
 144                          SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
 145                          SLAB_ACCOUNT)
 146#elif defined(CONFIG_SLUB)
 147#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
 148                          SLAB_TEMPORARY | SLAB_ACCOUNT)
 149#else
 150#define SLAB_CACHE_FLAGS (0)
 151#endif
 152
 153/* Common flags available with current configuration */
 154#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
 155
 156/* Common flags permitted for kmem_cache_create */
 157#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
 158                              SLAB_RED_ZONE | \
 159                              SLAB_POISON | \
 160                              SLAB_STORE_USER | \
 161                              SLAB_TRACE | \
 162                              SLAB_CONSISTENCY_CHECKS | \
 163                              SLAB_MEM_SPREAD | \
 164                              SLAB_NOLEAKTRACE | \
 165                              SLAB_RECLAIM_ACCOUNT | \
 166                              SLAB_TEMPORARY | \
 167                              SLAB_ACCOUNT)
 168
 169bool __kmem_cache_empty(struct kmem_cache *);
 170int __kmem_cache_shutdown(struct kmem_cache *);
 171void __kmem_cache_release(struct kmem_cache *);
 172int __kmem_cache_shrink(struct kmem_cache *);
 173void slab_kmem_cache_release(struct kmem_cache *);
 174
 175struct seq_file;
 176struct file;
 177
 178struct slabinfo {
 179        unsigned long active_objs;
 180        unsigned long num_objs;
 181        unsigned long active_slabs;
 182        unsigned long num_slabs;
 183        unsigned long shared_avail;
 184        unsigned int limit;
 185        unsigned int batchcount;
 186        unsigned int shared;
 187        unsigned int objects_per_slab;
 188        unsigned int cache_order;
 189};
 190
 191void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
 192void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
 193ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 194                       size_t count, loff_t *ppos);
 195
 196/*
 197 * Generic implementation of bulk operations
 198 * These are useful for situations in which the allocator cannot
 199 * perform optimizations. In that case segments of the object listed
 200 * may be allocated or freed using these operations.
 201 */
 202void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
 203int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
 204
 205static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
 206{
 207        return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
 208                NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
 209}
 210
 211#ifdef CONFIG_SLUB_DEBUG
 212#ifdef CONFIG_SLUB_DEBUG_ON
 213DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
 214#else
 215DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
 216#endif
 217extern void print_tracking(struct kmem_cache *s, void *object);
 218long validate_slab_cache(struct kmem_cache *s);
 219static inline bool __slub_debug_enabled(void)
 220{
 221        return static_branch_unlikely(&slub_debug_enabled);
 222}
 223#else
 224static inline void print_tracking(struct kmem_cache *s, void *object)
 225{
 226}
 227static inline bool __slub_debug_enabled(void)
 228{
 229        return false;
 230}
 231#endif
 232
 233/*
 234 * Returns true if any of the specified slub_debug flags is enabled for the
 235 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
 236 * the static key.
 237 */
 238static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
 239{
 240        if (IS_ENABLED(CONFIG_SLUB_DEBUG))
 241                VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
 242        if (__slub_debug_enabled())
 243                return s->flags & flags;
 244        return false;
 245}
 246
 247#ifdef CONFIG_MEMCG_KMEM
 248int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
 249                                 gfp_t gfp, bool new_page);
 250void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
 251                     enum node_stat_item idx, int nr);
 252
 253static inline void memcg_free_page_obj_cgroups(struct page *page)
 254{
 255        kfree(page_objcgs(page));
 256        page->memcg_data = 0;
 257}
 258
 259static inline size_t obj_full_size(struct kmem_cache *s)
 260{
 261        /*
 262         * For each accounted object there is an extra space which is used
 263         * to store obj_cgroup membership. Charge it too.
 264         */
 265        return s->size + sizeof(struct obj_cgroup *);
 266}
 267
 268/*
 269 * Returns false if the allocation should fail.
 270 */
 271static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
 272                                             struct obj_cgroup **objcgp,
 273                                             size_t objects, gfp_t flags)
 274{
 275        struct obj_cgroup *objcg;
 276
 277        if (!memcg_kmem_enabled())
 278                return true;
 279
 280        if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
 281                return true;
 282
 283        objcg = get_obj_cgroup_from_current();
 284        if (!objcg)
 285                return true;
 286
 287        if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
 288                obj_cgroup_put(objcg);
 289                return false;
 290        }
 291
 292        *objcgp = objcg;
 293        return true;
 294}
 295
 296static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
 297                                              struct obj_cgroup *objcg,
 298                                              gfp_t flags, size_t size,
 299                                              void **p)
 300{
 301        struct page *page;
 302        unsigned long off;
 303        size_t i;
 304
 305        if (!memcg_kmem_enabled() || !objcg)
 306                return;
 307
 308        for (i = 0; i < size; i++) {
 309                if (likely(p[i])) {
 310                        page = virt_to_head_page(p[i]);
 311
 312                        if (!page_objcgs(page) &&
 313                            memcg_alloc_page_obj_cgroups(page, s, flags,
 314                                                         false)) {
 315                                obj_cgroup_uncharge(objcg, obj_full_size(s));
 316                                continue;
 317                        }
 318
 319                        off = obj_to_index(s, page, p[i]);
 320                        obj_cgroup_get(objcg);
 321                        page_objcgs(page)[off] = objcg;
 322                        mod_objcg_state(objcg, page_pgdat(page),
 323                                        cache_vmstat_idx(s), obj_full_size(s));
 324                } else {
 325                        obj_cgroup_uncharge(objcg, obj_full_size(s));
 326                }
 327        }
 328        obj_cgroup_put(objcg);
 329}
 330
 331static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
 332                                        void **p, int objects)
 333{
 334        struct kmem_cache *s;
 335        struct obj_cgroup **objcgs;
 336        struct obj_cgroup *objcg;
 337        struct page *page;
 338        unsigned int off;
 339        int i;
 340
 341        if (!memcg_kmem_enabled())
 342                return;
 343
 344        for (i = 0; i < objects; i++) {
 345                if (unlikely(!p[i]))
 346                        continue;
 347
 348                page = virt_to_head_page(p[i]);
 349                objcgs = page_objcgs_check(page);
 350                if (!objcgs)
 351                        continue;
 352
 353                if (!s_orig)
 354                        s = page->slab_cache;
 355                else
 356                        s = s_orig;
 357
 358                off = obj_to_index(s, page, p[i]);
 359                objcg = objcgs[off];
 360                if (!objcg)
 361                        continue;
 362
 363                objcgs[off] = NULL;
 364                obj_cgroup_uncharge(objcg, obj_full_size(s));
 365                mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
 366                                -obj_full_size(s));
 367                obj_cgroup_put(objcg);
 368        }
 369}
 370
 371#else /* CONFIG_MEMCG_KMEM */
 372static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
 373{
 374        return NULL;
 375}
 376
 377static inline int memcg_alloc_page_obj_cgroups(struct page *page,
 378                                               struct kmem_cache *s, gfp_t gfp,
 379                                               bool new_page)
 380{
 381        return 0;
 382}
 383
 384static inline void memcg_free_page_obj_cgroups(struct page *page)
 385{
 386}
 387
 388static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
 389                                             struct obj_cgroup **objcgp,
 390                                             size_t objects, gfp_t flags)
 391{
 392        return true;
 393}
 394
 395static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
 396                                              struct obj_cgroup *objcg,
 397                                              gfp_t flags, size_t size,
 398                                              void **p)
 399{
 400}
 401
 402static inline void memcg_slab_free_hook(struct kmem_cache *s,
 403                                        void **p, int objects)
 404{
 405}
 406#endif /* CONFIG_MEMCG_KMEM */
 407
 408static inline struct kmem_cache *virt_to_cache(const void *obj)
 409{
 410        struct page *page;
 411
 412        page = virt_to_head_page(obj);
 413        if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
 414                                        __func__))
 415                return NULL;
 416        return page->slab_cache;
 417}
 418
 419static __always_inline void account_slab_page(struct page *page, int order,
 420                                              struct kmem_cache *s,
 421                                              gfp_t gfp)
 422{
 423        if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
 424                memcg_alloc_page_obj_cgroups(page, s, gfp, true);
 425
 426        mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
 427                            PAGE_SIZE << order);
 428}
 429
 430static __always_inline void unaccount_slab_page(struct page *page, int order,
 431                                                struct kmem_cache *s)
 432{
 433        if (memcg_kmem_enabled())
 434                memcg_free_page_obj_cgroups(page);
 435
 436        mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
 437                            -(PAGE_SIZE << order));
 438}
 439
 440static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 441{
 442        struct kmem_cache *cachep;
 443
 444        if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
 445            !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
 446                return s;
 447
 448        cachep = virt_to_cache(x);
 449        if (WARN(cachep && cachep != s,
 450                  "%s: Wrong slab cache. %s but object is from %s\n",
 451                  __func__, s->name, cachep->name))
 452                print_tracking(cachep, x);
 453        return cachep;
 454}
 455
 456static inline size_t slab_ksize(const struct kmem_cache *s)
 457{
 458#ifndef CONFIG_SLUB
 459        return s->object_size;
 460
 461#else /* CONFIG_SLUB */
 462# ifdef CONFIG_SLUB_DEBUG
 463        /*
 464         * Debugging requires use of the padding between object
 465         * and whatever may come after it.
 466         */
 467        if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
 468                return s->object_size;
 469# endif
 470        if (s->flags & SLAB_KASAN)
 471                return s->object_size;
 472        /*
 473         * If we have the need to store the freelist pointer
 474         * back there or track user information then we can
 475         * only use the space before that information.
 476         */
 477        if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
 478                return s->inuse;
 479        /*
 480         * Else we can use all the padding etc for the allocation
 481         */
 482        return s->size;
 483#endif
 484}
 485
 486static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
 487                                                     struct obj_cgroup **objcgp,
 488                                                     size_t size, gfp_t flags)
 489{
 490        flags &= gfp_allowed_mask;
 491
 492        might_alloc(flags);
 493
 494        if (should_failslab(s, flags))
 495                return NULL;
 496
 497        if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
 498                return NULL;
 499
 500        return s;
 501}
 502
 503static inline void slab_post_alloc_hook(struct kmem_cache *s,
 504                                        struct obj_cgroup *objcg, gfp_t flags,
 505                                        size_t size, void **p, bool init)
 506{
 507        size_t i;
 508
 509        flags &= gfp_allowed_mask;
 510
 511        /*
 512         * As memory initialization might be integrated into KASAN,
 513         * kasan_slab_alloc and initialization memset must be
 514         * kept together to avoid discrepancies in behavior.
 515         *
 516         * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
 517         */
 518        for (i = 0; i < size; i++) {
 519                p[i] = kasan_slab_alloc(s, p[i], flags, init);
 520                if (p[i] && init && !kasan_has_integrated_init())
 521                        memset(p[i], 0, s->object_size);
 522                kmemleak_alloc_recursive(p[i], s->object_size, 1,
 523                                         s->flags, flags);
 524        }
 525
 526        memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
 527}
 528
 529#ifndef CONFIG_SLOB
 530/*
 531 * The slab lists for all objects.
 532 */
 533struct kmem_cache_node {
 534        spinlock_t list_lock;
 535
 536#ifdef CONFIG_SLAB
 537        struct list_head slabs_partial; /* partial list first, better asm code */
 538        struct list_head slabs_full;
 539        struct list_head slabs_free;
 540        unsigned long total_slabs;      /* length of all slab lists */
 541        unsigned long free_slabs;       /* length of free slab list only */
 542        unsigned long free_objects;
 543        unsigned int free_limit;
 544        unsigned int colour_next;       /* Per-node cache coloring */
 545        struct array_cache *shared;     /* shared per node */
 546        struct alien_cache **alien;     /* on other nodes */
 547        unsigned long next_reap;        /* updated without locking */
 548        int free_touched;               /* updated without locking */
 549#endif
 550
 551#ifdef CONFIG_SLUB
 552        unsigned long nr_partial;
 553        struct list_head partial;
 554#ifdef CONFIG_SLUB_DEBUG
 555        atomic_long_t nr_slabs;
 556        atomic_long_t total_objects;
 557        struct list_head full;
 558#endif
 559#endif
 560
 561};
 562
 563static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
 564{
 565        return s->node[node];
 566}
 567
 568/*
 569 * Iterator over all nodes. The body will be executed for each node that has
 570 * a kmem_cache_node structure allocated (which is true for all online nodes)
 571 */
 572#define for_each_kmem_cache_node(__s, __node, __n) \
 573        for (__node = 0; __node < nr_node_ids; __node++) \
 574                 if ((__n = get_node(__s, __node)))
 575
 576#endif
 577
 578void *slab_start(struct seq_file *m, loff_t *pos);
 579void *slab_next(struct seq_file *m, void *p, loff_t *pos);
 580void slab_stop(struct seq_file *m, void *p);
 581int memcg_slab_show(struct seq_file *m, void *p);
 582
 583#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
 584void dump_unreclaimable_slab(void);
 585#else
 586static inline void dump_unreclaimable_slab(void)
 587{
 588}
 589#endif
 590
 591void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
 592
 593#ifdef CONFIG_SLAB_FREELIST_RANDOM
 594int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
 595                        gfp_t gfp);
 596void cache_random_seq_destroy(struct kmem_cache *cachep);
 597#else
 598static inline int cache_random_seq_create(struct kmem_cache *cachep,
 599                                        unsigned int count, gfp_t gfp)
 600{
 601        return 0;
 602}
 603static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
 604#endif /* CONFIG_SLAB_FREELIST_RANDOM */
 605
 606static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
 607{
 608        if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
 609                                &init_on_alloc)) {
 610                if (c->ctor)
 611                        return false;
 612                if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
 613                        return flags & __GFP_ZERO;
 614                return true;
 615        }
 616        return flags & __GFP_ZERO;
 617}
 618
 619static inline bool slab_want_init_on_free(struct kmem_cache *c)
 620{
 621        if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
 622                                &init_on_free))
 623                return !(c->ctor ||
 624                         (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
 625        return false;
 626}
 627
 628#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
 629void debugfs_slab_release(struct kmem_cache *);
 630#else
 631static inline void debugfs_slab_release(struct kmem_cache *s) { }
 632#endif
 633
 634#ifdef CONFIG_PRINTK
 635#define KS_ADDRS_COUNT 16
 636struct kmem_obj_info {
 637        void *kp_ptr;
 638        struct page *kp_page;
 639        void *kp_objp;
 640        unsigned long kp_data_offset;
 641        struct kmem_cache *kp_slab_cache;
 642        void *kp_ret;
 643        void *kp_stack[KS_ADDRS_COUNT];
 644        void *kp_free_stack[KS_ADDRS_COUNT];
 645};
 646void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
 647#endif
 648
 649#endif /* MM_SLAB_H */
 650