linux/mm/slub.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * SLUB: A slab allocator that limits cache line use instead of queuing
   4 * objects in per cpu and per node lists.
   5 *
   6 * The allocator synchronizes using per slab locks or atomic operatios
   7 * and only uses a centralized lock to manage a pool of partial slabs.
   8 *
   9 * (C) 2007 SGI, Christoph Lameter
  10 * (C) 2011 Linux Foundation, Christoph Lameter
  11 */
  12
  13#include <linux/mm.h>
  14#include <linux/swap.h> /* struct reclaim_state */
  15#include <linux/module.h>
  16#include <linux/bit_spinlock.h>
  17#include <linux/interrupt.h>
  18#include <linux/bitops.h>
  19#include <linux/slab.h>
  20#include "slab.h"
  21#include <linux/proc_fs.h>
  22#include <linux/notifier.h>
  23#include <linux/seq_file.h>
  24#include <linux/kasan.h>
  25#include <linux/cpu.h>
  26#include <linux/cpuset.h>
  27#include <linux/mempolicy.h>
  28#include <linux/ctype.h>
  29#include <linux/debugobjects.h>
  30#include <linux/kallsyms.h>
  31#include <linux/memory.h>
  32#include <linux/math64.h>
  33#include <linux/fault-inject.h>
  34#include <linux/stacktrace.h>
  35#include <linux/prefetch.h>
  36#include <linux/memcontrol.h>
  37#include <linux/random.h>
  38
  39#include <trace/events/kmem.h>
  40
  41#include "internal.h"
  42
  43/*
  44 * Lock order:
  45 *   1. slab_mutex (Global Mutex)
  46 *   2. node->list_lock
  47 *   3. slab_lock(page) (Only on some arches and for debugging)
  48 *
  49 *   slab_mutex
  50 *
  51 *   The role of the slab_mutex is to protect the list of all the slabs
  52 *   and to synchronize major metadata changes to slab cache structures.
  53 *
  54 *   The slab_lock is only used for debugging and on arches that do not
  55 *   have the ability to do a cmpxchg_double. It only protects the second
  56 *   double word in the page struct. Meaning
  57 *      A. page->freelist       -> List of object free in a page
  58 *      B. page->counters       -> Counters of objects
  59 *      C. page->frozen         -> frozen state
  60 *
  61 *   If a slab is frozen then it is exempt from list management. It is not
  62 *   on any list. The processor that froze the slab is the one who can
  63 *   perform list operations on the page. Other processors may put objects
  64 *   onto the freelist but the processor that froze the slab is the only
  65 *   one that can retrieve the objects from the page's freelist.
  66 *
  67 *   The list_lock protects the partial and full list on each node and
  68 *   the partial slab counter. If taken then no new slabs may be added or
  69 *   removed from the lists nor make the number of partial slabs be modified.
  70 *   (Note that the total number of slabs is an atomic value that may be
  71 *   modified without taking the list lock).
  72 *
  73 *   The list_lock is a centralized lock and thus we avoid taking it as
  74 *   much as possible. As long as SLUB does not have to handle partial
  75 *   slabs, operations can continue without any centralized lock. F.e.
  76 *   allocating a long series of objects that fill up slabs does not require
  77 *   the list lock.
  78 *   Interrupts are disabled during allocation and deallocation in order to
  79 *   make the slab allocator safe to use in the context of an irq. In addition
  80 *   interrupts are disabled to ensure that the processor does not change
  81 *   while handling per_cpu slabs, due to kernel preemption.
  82 *
  83 * SLUB assigns one slab for allocation to each processor.
  84 * Allocations only occur from these slabs called cpu slabs.
  85 *
  86 * Slabs with free elements are kept on a partial list and during regular
  87 * operations no list for full slabs is used. If an object in a full slab is
  88 * freed then the slab will show up again on the partial lists.
  89 * We track full slabs for debugging purposes though because otherwise we
  90 * cannot scan all objects.
  91 *
  92 * Slabs are freed when they become empty. Teardown and setup is
  93 * minimal so we rely on the page allocators per cpu caches for
  94 * fast frees and allocs.
  95 *
  96 * Overloading of page flags that are otherwise used for LRU management.
  97 *
  98 * PageActive           The slab is frozen and exempt from list processing.
  99 *                      This means that the slab is dedicated to a purpose
 100 *                      such as satisfying allocations for a specific
 101 *                      processor. Objects may be freed in the slab while
 102 *                      it is frozen but slab_free will then skip the usual
 103 *                      list operations. It is up to the processor holding
 104 *                      the slab to integrate the slab into the slab lists
 105 *                      when the slab is no longer needed.
 106 *
 107 *                      One use of this flag is to mark slabs that are
 108 *                      used for allocations. Then such a slab becomes a cpu
 109 *                      slab. The cpu slab may be equipped with an additional
 110 *                      freelist that allows lockless access to
 111 *                      free objects in addition to the regular freelist
 112 *                      that requires the slab lock.
 113 *
 114 * PageError            Slab requires special handling due to debug
 115 *                      options set. This moves slab handling out of
 116 *                      the fast path and disables lockless freelists.
 117 */
 118
 119static inline int kmem_cache_debug(struct kmem_cache *s)
 120{
 121#ifdef CONFIG_SLUB_DEBUG
 122        return unlikely(s->flags & SLAB_DEBUG_FLAGS);
 123#else
 124        return 0;
 125#endif
 126}
 127
 128void *fixup_red_left(struct kmem_cache *s, void *p)
 129{
 130        if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
 131                p += s->red_left_pad;
 132
 133        return p;
 134}
 135
 136static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
 137{
 138#ifdef CONFIG_SLUB_CPU_PARTIAL
 139        return !kmem_cache_debug(s);
 140#else
 141        return false;
 142#endif
 143}
 144
 145/*
 146 * Issues still to be resolved:
 147 *
 148 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
 149 *
 150 * - Variable sizing of the per node arrays
 151 */
 152
 153/* Enable to test recovery from slab corruption on boot */
 154#undef SLUB_RESILIENCY_TEST
 155
 156/* Enable to log cmpxchg failures */
 157#undef SLUB_DEBUG_CMPXCHG
 158
 159/*
 160 * Mininum number of partial slabs. These will be left on the partial
 161 * lists even if they are empty. kmem_cache_shrink may reclaim them.
 162 */
 163#define MIN_PARTIAL 5
 164
 165/*
 166 * Maximum number of desirable partial slabs.
 167 * The existence of more partial slabs makes kmem_cache_shrink
 168 * sort the partial list by the number of objects in use.
 169 */
 170#define MAX_PARTIAL 10
 171
 172#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
 173                                SLAB_POISON | SLAB_STORE_USER)
 174
 175/*
 176 * These debug flags cannot use CMPXCHG because there might be consistency
 177 * issues when checking or reading debug information
 178 */
 179#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
 180                                SLAB_TRACE)
 181
 182
 183/*
 184 * Debugging flags that require metadata to be stored in the slab.  These get
 185 * disabled when slub_debug=O is used and a cache's min order increases with
 186 * metadata.
 187 */
 188#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 189
 190#define OO_SHIFT        16
 191#define OO_MASK         ((1 << OO_SHIFT) - 1)
 192#define MAX_OBJS_PER_PAGE       32767 /* since page.objects is u15 */
 193
 194/* Internal SLUB flags */
 195/* Poison object */
 196#define __OBJECT_POISON         ((slab_flags_t __force)0x80000000U)
 197/* Use cmpxchg_double */
 198#define __CMPXCHG_DOUBLE        ((slab_flags_t __force)0x40000000U)
 199
 200/*
 201 * Tracking user of a slab.
 202 */
 203#define TRACK_ADDRS_COUNT 16
 204struct track {
 205        unsigned long addr;     /* Called from address */
 206#ifdef CONFIG_STACKTRACE
 207        unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
 208#endif
 209        int cpu;                /* Was running on cpu */
 210        int pid;                /* Pid context */
 211        unsigned long when;     /* When did the operation occur */
 212};
 213
 214enum track_item { TRACK_ALLOC, TRACK_FREE };
 215
 216#ifdef CONFIG_SYSFS
 217static int sysfs_slab_add(struct kmem_cache *);
 218static int sysfs_slab_alias(struct kmem_cache *, const char *);
 219static void memcg_propagate_slab_attrs(struct kmem_cache *s);
 220static void sysfs_slab_remove(struct kmem_cache *s);
 221#else
 222static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
 223static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
 224                                                        { return 0; }
 225static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
 226static inline void sysfs_slab_remove(struct kmem_cache *s) { }
 227#endif
 228
 229static inline void stat(const struct kmem_cache *s, enum stat_item si)
 230{
 231#ifdef CONFIG_SLUB_STATS
 232        /*
 233         * The rmw is racy on a preemptible kernel but this is acceptable, so
 234         * avoid this_cpu_add()'s irq-disable overhead.
 235         */
 236        raw_cpu_inc(s->cpu_slab->stat[si]);
 237#endif
 238}
 239
 240/********************************************************************
 241 *                      Core slab cache functions
 242 *******************************************************************/
 243
 244/*
 245 * Returns freelist pointer (ptr). With hardening, this is obfuscated
 246 * with an XOR of the address where the pointer is held and a per-cache
 247 * random number.
 248 */
 249static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
 250                                 unsigned long ptr_addr)
 251{
 252#ifdef CONFIG_SLAB_FREELIST_HARDENED
 253        return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
 254#else
 255        return ptr;
 256#endif
 257}
 258
 259/* Returns the freelist pointer recorded at location ptr_addr. */
 260static inline void *freelist_dereference(const struct kmem_cache *s,
 261                                         void *ptr_addr)
 262{
 263        return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
 264                            (unsigned long)ptr_addr);
 265}
 266
 267static inline void *get_freepointer(struct kmem_cache *s, void *object)
 268{
 269        return freelist_dereference(s, object + s->offset);
 270}
 271
 272static void prefetch_freepointer(const struct kmem_cache *s, void *object)
 273{
 274        if (object)
 275                prefetch(freelist_dereference(s, object + s->offset));
 276}
 277
 278static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
 279{
 280        unsigned long freepointer_addr;
 281        void *p;
 282
 283        if (!debug_pagealloc_enabled())
 284                return get_freepointer(s, object);
 285
 286        freepointer_addr = (unsigned long)object + s->offset;
 287        probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
 288        return freelist_ptr(s, p, freepointer_addr);
 289}
 290
 291static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
 292{
 293        unsigned long freeptr_addr = (unsigned long)object + s->offset;
 294
 295#ifdef CONFIG_SLAB_FREELIST_HARDENED
 296        BUG_ON(object == fp); /* naive detection of double free or corruption */
 297#endif
 298
 299        *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
 300}
 301
 302/* Loop over all objects in a slab */
 303#define for_each_object(__p, __s, __addr, __objects) \
 304        for (__p = fixup_red_left(__s, __addr); \
 305                __p < (__addr) + (__objects) * (__s)->size; \
 306                __p += (__s)->size)
 307
 308#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
 309        for (__p = fixup_red_left(__s, __addr), __idx = 1; \
 310                __idx <= __objects; \
 311                __p += (__s)->size, __idx++)
 312
 313/* Determine object index from a given position */
 314static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
 315{
 316        return (p - addr) / s->size;
 317}
 318
 319static inline unsigned int order_objects(unsigned int order, unsigned int size, unsigned int reserved)
 320{
 321        return (((unsigned int)PAGE_SIZE << order) - reserved) / size;
 322}
 323
 324static inline struct kmem_cache_order_objects oo_make(unsigned int order,
 325                unsigned int size, unsigned int reserved)
 326{
 327        struct kmem_cache_order_objects x = {
 328                (order << OO_SHIFT) + order_objects(order, size, reserved)
 329        };
 330
 331        return x;
 332}
 333
 334static inline unsigned int oo_order(struct kmem_cache_order_objects x)
 335{
 336        return x.x >> OO_SHIFT;
 337}
 338
 339static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
 340{
 341        return x.x & OO_MASK;
 342}
 343
 344/*
 345 * Per slab locking using the pagelock
 346 */
 347static __always_inline void slab_lock(struct page *page)
 348{
 349        VM_BUG_ON_PAGE(PageTail(page), page);
 350        bit_spin_lock(PG_locked, &page->flags);
 351}
 352
 353static __always_inline void slab_unlock(struct page *page)
 354{
 355        VM_BUG_ON_PAGE(PageTail(page), page);
 356        __bit_spin_unlock(PG_locked, &page->flags);
 357}
 358
 359static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
 360{
 361        struct page tmp;
 362        tmp.counters = counters_new;
 363        /*
 364         * page->counters can cover frozen/inuse/objects as well
 365         * as page->_refcount.  If we assign to ->counters directly
 366         * we run the risk of losing updates to page->_refcount, so
 367         * be careful and only assign to the fields we need.
 368         */
 369        page->frozen  = tmp.frozen;
 370        page->inuse   = tmp.inuse;
 371        page->objects = tmp.objects;
 372}
 373
 374/* Interrupts must be disabled (for the fallback code to work right) */
 375static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 376                void *freelist_old, unsigned long counters_old,
 377                void *freelist_new, unsigned long counters_new,
 378                const char *n)
 379{
 380        VM_BUG_ON(!irqs_disabled());
 381#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 382    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 383        if (s->flags & __CMPXCHG_DOUBLE) {
 384                if (cmpxchg_double(&page->freelist, &page->counters,
 385                                   freelist_old, counters_old,
 386                                   freelist_new, counters_new))
 387                        return true;
 388        } else
 389#endif
 390        {
 391                slab_lock(page);
 392                if (page->freelist == freelist_old &&
 393                                        page->counters == counters_old) {
 394                        page->freelist = freelist_new;
 395                        set_page_slub_counters(page, counters_new);
 396                        slab_unlock(page);
 397                        return true;
 398                }
 399                slab_unlock(page);
 400        }
 401
 402        cpu_relax();
 403        stat(s, CMPXCHG_DOUBLE_FAIL);
 404
 405#ifdef SLUB_DEBUG_CMPXCHG
 406        pr_info("%s %s: cmpxchg double redo ", n, s->name);
 407#endif
 408
 409        return false;
 410}
 411
 412static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 413                void *freelist_old, unsigned long counters_old,
 414                void *freelist_new, unsigned long counters_new,
 415                const char *n)
 416{
 417#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 418    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 419        if (s->flags & __CMPXCHG_DOUBLE) {
 420                if (cmpxchg_double(&page->freelist, &page->counters,
 421                                   freelist_old, counters_old,
 422                                   freelist_new, counters_new))
 423                        return true;
 424        } else
 425#endif
 426        {
 427                unsigned long flags;
 428
 429                local_irq_save(flags);
 430                slab_lock(page);
 431                if (page->freelist == freelist_old &&
 432                                        page->counters == counters_old) {
 433                        page->freelist = freelist_new;
 434                        set_page_slub_counters(page, counters_new);
 435                        slab_unlock(page);
 436                        local_irq_restore(flags);
 437                        return true;
 438                }
 439                slab_unlock(page);
 440                local_irq_restore(flags);
 441        }
 442
 443        cpu_relax();
 444        stat(s, CMPXCHG_DOUBLE_FAIL);
 445
 446#ifdef SLUB_DEBUG_CMPXCHG
 447        pr_info("%s %s: cmpxchg double redo ", n, s->name);
 448#endif
 449
 450        return false;
 451}
 452
 453#ifdef CONFIG_SLUB_DEBUG
 454/*
 455 * Determine a map of object in use on a page.
 456 *
 457 * Node listlock must be held to guarantee that the page does
 458 * not vanish from under us.
 459 */
 460static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
 461{
 462        void *p;
 463        void *addr = page_address(page);
 464
 465        for (p = page->freelist; p; p = get_freepointer(s, p))
 466                set_bit(slab_index(p, s, addr), map);
 467}
 468
 469static inline unsigned int size_from_object(struct kmem_cache *s)
 470{
 471        if (s->flags & SLAB_RED_ZONE)
 472                return s->size - s->red_left_pad;
 473
 474        return s->size;
 475}
 476
 477static inline void *restore_red_left(struct kmem_cache *s, void *p)
 478{
 479        if (s->flags & SLAB_RED_ZONE)
 480                p -= s->red_left_pad;
 481
 482        return p;
 483}
 484
 485/*
 486 * Debug settings:
 487 */
 488#if defined(CONFIG_SLUB_DEBUG_ON)
 489static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
 490#else
 491static slab_flags_t slub_debug;
 492#endif
 493
 494static char *slub_debug_slabs;
 495static int disable_higher_order_debug;
 496
 497/*
 498 * slub is about to manipulate internal object metadata.  This memory lies
 499 * outside the range of the allocated object, so accessing it would normally
 500 * be reported by kasan as a bounds error.  metadata_access_enable() is used
 501 * to tell kasan that these accesses are OK.
 502 */
 503static inline void metadata_access_enable(void)
 504{
 505        kasan_disable_current();
 506}
 507
 508static inline void metadata_access_disable(void)
 509{
 510        kasan_enable_current();
 511}
 512
 513/*
 514 * Object debugging
 515 */
 516
 517/* Verify that a pointer has an address that is valid within a slab page */
 518static inline int check_valid_pointer(struct kmem_cache *s,
 519                                struct page *page, void *object)
 520{
 521        void *base;
 522
 523        if (!object)
 524                return 1;
 525
 526        base = page_address(page);
 527        object = restore_red_left(s, object);
 528        if (object < base || object >= base + page->objects * s->size ||
 529                (object - base) % s->size) {
 530                return 0;
 531        }
 532
 533        return 1;
 534}
 535
 536static void print_section(char *level, char *text, u8 *addr,
 537                          unsigned int length)
 538{
 539        metadata_access_enable();
 540        print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
 541                        length, 1);
 542        metadata_access_disable();
 543}
 544
 545static struct track *get_track(struct kmem_cache *s, void *object,
 546        enum track_item alloc)
 547{
 548        struct track *p;
 549
 550        if (s->offset)
 551                p = object + s->offset + sizeof(void *);
 552        else
 553                p = object + s->inuse;
 554
 555        return p + alloc;
 556}
 557
 558static void set_track(struct kmem_cache *s, void *object,
 559                        enum track_item alloc, unsigned long addr)
 560{
 561        struct track *p = get_track(s, object, alloc);
 562
 563        if (addr) {
 564#ifdef CONFIG_STACKTRACE
 565                struct stack_trace trace;
 566                int i;
 567
 568                trace.nr_entries = 0;
 569                trace.max_entries = TRACK_ADDRS_COUNT;
 570                trace.entries = p->addrs;
 571                trace.skip = 3;
 572                metadata_access_enable();
 573                save_stack_trace(&trace);
 574                metadata_access_disable();
 575
 576                /* See rant in lockdep.c */
 577                if (trace.nr_entries != 0 &&
 578                    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
 579                        trace.nr_entries--;
 580
 581                for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
 582                        p->addrs[i] = 0;
 583#endif
 584                p->addr = addr;
 585                p->cpu = smp_processor_id();
 586                p->pid = current->pid;
 587                p->when = jiffies;
 588        } else
 589                memset(p, 0, sizeof(struct track));
 590}
 591
 592static void init_tracking(struct kmem_cache *s, void *object)
 593{
 594        if (!(s->flags & SLAB_STORE_USER))
 595                return;
 596
 597        set_track(s, object, TRACK_FREE, 0UL);
 598        set_track(s, object, TRACK_ALLOC, 0UL);
 599}
 600
 601static void print_track(const char *s, struct track *t, unsigned long pr_time)
 602{
 603        if (!t->addr)
 604                return;
 605
 606        pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
 607               s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
 608#ifdef CONFIG_STACKTRACE
 609        {
 610                int i;
 611                for (i = 0; i < TRACK_ADDRS_COUNT; i++)
 612                        if (t->addrs[i])
 613                                pr_err("\t%pS\n", (void *)t->addrs[i]);
 614                        else
 615                                break;
 616        }
 617#endif
 618}
 619
 620static void print_tracking(struct kmem_cache *s, void *object)
 621{
 622        unsigned long pr_time = jiffies;
 623        if (!(s->flags & SLAB_STORE_USER))
 624                return;
 625
 626        print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
 627        print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
 628}
 629
 630static void print_page_info(struct page *page)
 631{
 632        pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
 633               page, page->objects, page->inuse, page->freelist, page->flags);
 634
 635}
 636
 637static void slab_bug(struct kmem_cache *s, char *fmt, ...)
 638{
 639        struct va_format vaf;
 640        va_list args;
 641
 642        va_start(args, fmt);
 643        vaf.fmt = fmt;
 644        vaf.va = &args;
 645        pr_err("=============================================================================\n");
 646        pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
 647        pr_err("-----------------------------------------------------------------------------\n\n");
 648
 649        add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 650        va_end(args);
 651}
 652
 653static void slab_fix(struct kmem_cache *s, char *fmt, ...)
 654{
 655        struct va_format vaf;
 656        va_list args;
 657
 658        va_start(args, fmt);
 659        vaf.fmt = fmt;
 660        vaf.va = &args;
 661        pr_err("FIX %s: %pV\n", s->name, &vaf);
 662        va_end(args);
 663}
 664
 665static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 666{
 667        unsigned int off;       /* Offset of last byte */
 668        u8 *addr = page_address(page);
 669
 670        print_tracking(s, p);
 671
 672        print_page_info(page);
 673
 674        pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
 675               p, p - addr, get_freepointer(s, p));
 676
 677        if (s->flags & SLAB_RED_ZONE)
 678                print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
 679                              s->red_left_pad);
 680        else if (p > addr + 16)
 681                print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 682
 683        print_section(KERN_ERR, "Object ", p,
 684                      min_t(unsigned int, s->object_size, PAGE_SIZE));
 685        if (s->flags & SLAB_RED_ZONE)
 686                print_section(KERN_ERR, "Redzone ", p + s->object_size,
 687                        s->inuse - s->object_size);
 688
 689        if (s->offset)
 690                off = s->offset + sizeof(void *);
 691        else
 692                off = s->inuse;
 693
 694        if (s->flags & SLAB_STORE_USER)
 695                off += 2 * sizeof(struct track);
 696
 697        off += kasan_metadata_size(s);
 698
 699        if (off != size_from_object(s))
 700                /* Beginning of the filler is the free pointer */
 701                print_section(KERN_ERR, "Padding ", p + off,
 702                              size_from_object(s) - off);
 703
 704        dump_stack();
 705}
 706
 707void object_err(struct kmem_cache *s, struct page *page,
 708                        u8 *object, char *reason)
 709{
 710        slab_bug(s, "%s", reason);
 711        print_trailer(s, page, object);
 712}
 713
 714static void slab_err(struct kmem_cache *s, struct page *page,
 715                        const char *fmt, ...)
 716{
 717        va_list args;
 718        char buf[100];
 719
 720        va_start(args, fmt);
 721        vsnprintf(buf, sizeof(buf), fmt, args);
 722        va_end(args);
 723        slab_bug(s, "%s", buf);
 724        print_page_info(page);
 725        dump_stack();
 726}
 727
 728static void init_object(struct kmem_cache *s, void *object, u8 val)
 729{
 730        u8 *p = object;
 731
 732        if (s->flags & SLAB_RED_ZONE)
 733                memset(p - s->red_left_pad, val, s->red_left_pad);
 734
 735        if (s->flags & __OBJECT_POISON) {
 736                memset(p, POISON_FREE, s->object_size - 1);
 737                p[s->object_size - 1] = POISON_END;
 738        }
 739
 740        if (s->flags & SLAB_RED_ZONE)
 741                memset(p + s->object_size, val, s->inuse - s->object_size);
 742}
 743
 744static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
 745                                                void *from, void *to)
 746{
 747        slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
 748        memset(from, data, to - from);
 749}
 750
 751static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
 752                        u8 *object, char *what,
 753                        u8 *start, unsigned int value, unsigned int bytes)
 754{
 755        u8 *fault;
 756        u8 *end;
 757
 758        metadata_access_enable();
 759        fault = memchr_inv(start, value, bytes);
 760        metadata_access_disable();
 761        if (!fault)
 762                return 1;
 763
 764        end = start + bytes;
 765        while (end > fault && end[-1] == value)
 766                end--;
 767
 768        slab_bug(s, "%s overwritten", what);
 769        pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
 770                                        fault, end - 1, fault[0], value);
 771        print_trailer(s, page, object);
 772
 773        restore_bytes(s, what, value, fault, end);
 774        return 0;
 775}
 776
 777/*
 778 * Object layout:
 779 *
 780 * object address
 781 *      Bytes of the object to be managed.
 782 *      If the freepointer may overlay the object then the free
 783 *      pointer is the first word of the object.
 784 *
 785 *      Poisoning uses 0x6b (POISON_FREE) and the last byte is
 786 *      0xa5 (POISON_END)
 787 *
 788 * object + s->object_size
 789 *      Padding to reach word boundary. This is also used for Redzoning.
 790 *      Padding is extended by another word if Redzoning is enabled and
 791 *      object_size == inuse.
 792 *
 793 *      We fill with 0xbb (RED_INACTIVE) for inactive objects and with
 794 *      0xcc (RED_ACTIVE) for objects in use.
 795 *
 796 * object + s->inuse
 797 *      Meta data starts here.
 798 *
 799 *      A. Free pointer (if we cannot overwrite object on free)
 800 *      B. Tracking data for SLAB_STORE_USER
 801 *      C. Padding to reach required alignment boundary or at mininum
 802 *              one word if debugging is on to be able to detect writes
 803 *              before the word boundary.
 804 *
 805 *      Padding is done using 0x5a (POISON_INUSE)
 806 *
 807 * object + s->size
 808 *      Nothing is used beyond s->size.
 809 *
 810 * If slabcaches are merged then the object_size and inuse boundaries are mostly
 811 * ignored. And therefore no slab options that rely on these boundaries
 812 * may be used with merged slabcaches.
 813 */
 814
 815static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
 816{
 817        unsigned long off = s->inuse;   /* The end of info */
 818
 819        if (s->offset)
 820                /* Freepointer is placed after the object. */
 821                off += sizeof(void *);
 822
 823        if (s->flags & SLAB_STORE_USER)
 824                /* We also have user information there */
 825                off += 2 * sizeof(struct track);
 826
 827        off += kasan_metadata_size(s);
 828
 829        if (size_from_object(s) == off)
 830                return 1;
 831
 832        return check_bytes_and_report(s, page, p, "Object padding",
 833                        p + off, POISON_INUSE, size_from_object(s) - off);
 834}
 835
 836/* Check the pad bytes at the end of a slab page */
 837static int slab_pad_check(struct kmem_cache *s, struct page *page)
 838{
 839        u8 *start;
 840        u8 *fault;
 841        u8 *end;
 842        u8 *pad;
 843        int length;
 844        int remainder;
 845
 846        if (!(s->flags & SLAB_POISON))
 847                return 1;
 848
 849        start = page_address(page);
 850        length = (PAGE_SIZE << compound_order(page)) - s->reserved;
 851        end = start + length;
 852        remainder = length % s->size;
 853        if (!remainder)
 854                return 1;
 855
 856        pad = end - remainder;
 857        metadata_access_enable();
 858        fault = memchr_inv(pad, POISON_INUSE, remainder);
 859        metadata_access_disable();
 860        if (!fault)
 861                return 1;
 862        while (end > fault && end[-1] == POISON_INUSE)
 863                end--;
 864
 865        slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
 866        print_section(KERN_ERR, "Padding ", pad, remainder);
 867
 868        restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
 869        return 0;
 870}
 871
 872static int check_object(struct kmem_cache *s, struct page *page,
 873                                        void *object, u8 val)
 874{
 875        u8 *p = object;
 876        u8 *endobject = object + s->object_size;
 877
 878        if (s->flags & SLAB_RED_ZONE) {
 879                if (!check_bytes_and_report(s, page, object, "Redzone",
 880                        object - s->red_left_pad, val, s->red_left_pad))
 881                        return 0;
 882
 883                if (!check_bytes_and_report(s, page, object, "Redzone",
 884                        endobject, val, s->inuse - s->object_size))
 885                        return 0;
 886        } else {
 887                if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
 888                        check_bytes_and_report(s, page, p, "Alignment padding",
 889                                endobject, POISON_INUSE,
 890                                s->inuse - s->object_size);
 891                }
 892        }
 893
 894        if (s->flags & SLAB_POISON) {
 895                if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
 896                        (!check_bytes_and_report(s, page, p, "Poison", p,
 897                                        POISON_FREE, s->object_size - 1) ||
 898                         !check_bytes_and_report(s, page, p, "Poison",
 899                                p + s->object_size - 1, POISON_END, 1)))
 900                        return 0;
 901                /*
 902                 * check_pad_bytes cleans up on its own.
 903                 */
 904                check_pad_bytes(s, page, p);
 905        }
 906
 907        if (!s->offset && val == SLUB_RED_ACTIVE)
 908                /*
 909                 * Object and freepointer overlap. Cannot check
 910                 * freepointer while object is allocated.
 911                 */
 912                return 1;
 913
 914        /* Check free pointer validity */
 915        if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
 916                object_err(s, page, p, "Freepointer corrupt");
 917                /*
 918                 * No choice but to zap it and thus lose the remainder
 919                 * of the free objects in this slab. May cause
 920                 * another error because the object count is now wrong.
 921                 */
 922                set_freepointer(s, p, NULL);
 923                return 0;
 924        }
 925        return 1;
 926}
 927
 928static int check_slab(struct kmem_cache *s, struct page *page)
 929{
 930        int maxobj;
 931
 932        VM_BUG_ON(!irqs_disabled());
 933
 934        if (!PageSlab(page)) {
 935                slab_err(s, page, "Not a valid slab page");
 936                return 0;
 937        }
 938
 939        maxobj = order_objects(compound_order(page), s->size, s->reserved);
 940        if (page->objects > maxobj) {
 941                slab_err(s, page, "objects %u > max %u",
 942                        page->objects, maxobj);
 943                return 0;
 944        }
 945        if (page->inuse > page->objects) {
 946                slab_err(s, page, "inuse %u > max %u",
 947                        page->inuse, page->objects);
 948                return 0;
 949        }
 950        /* Slab_pad_check fixes things up after itself */
 951        slab_pad_check(s, page);
 952        return 1;
 953}
 954
 955/*
 956 * Determine if a certain object on a page is on the freelist. Must hold the
 957 * slab lock to guarantee that the chains are in a consistent state.
 958 */
 959static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
 960{
 961        int nr = 0;
 962        void *fp;
 963        void *object = NULL;
 964        int max_objects;
 965
 966        fp = page->freelist;
 967        while (fp && nr <= page->objects) {
 968                if (fp == search)
 969                        return 1;
 970                if (!check_valid_pointer(s, page, fp)) {
 971                        if (object) {
 972                                object_err(s, page, object,
 973                                        "Freechain corrupt");
 974                                set_freepointer(s, object, NULL);
 975                        } else {
 976                                slab_err(s, page, "Freepointer corrupt");
 977                                page->freelist = NULL;
 978                                page->inuse = page->objects;
 979                                slab_fix(s, "Freelist cleared");
 980                                return 0;
 981                        }
 982                        break;
 983                }
 984                object = fp;
 985                fp = get_freepointer(s, object);
 986                nr++;
 987        }
 988
 989        max_objects = order_objects(compound_order(page), s->size, s->reserved);
 990        if (max_objects > MAX_OBJS_PER_PAGE)
 991                max_objects = MAX_OBJS_PER_PAGE;
 992
 993        if (page->objects != max_objects) {
 994                slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
 995                         page->objects, max_objects);
 996                page->objects = max_objects;
 997                slab_fix(s, "Number of objects adjusted.");
 998        }
 999        if (page->inuse != page->objects - nr) {
1000                slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1001                         page->inuse, page->objects - nr);
1002                page->inuse = page->objects - nr;
1003                slab_fix(s, "Object count adjusted.");
1004        }
1005        return search == NULL;
1006}
1007
1008static void trace(struct kmem_cache *s, struct page *page, void *object,
1009                                                                int alloc)
1010{
1011        if (s->flags & SLAB_TRACE) {
1012                pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1013                        s->name,
1014                        alloc ? "alloc" : "free",
1015                        object, page->inuse,
1016                        page->freelist);
1017
1018                if (!alloc)
1019                        print_section(KERN_INFO, "Object ", (void *)object,
1020                                        s->object_size);
1021
1022                dump_stack();
1023        }
1024}
1025
1026/*
1027 * Tracking of fully allocated slabs for debugging purposes.
1028 */
1029static void add_full(struct kmem_cache *s,
1030        struct kmem_cache_node *n, struct page *page)
1031{
1032        if (!(s->flags & SLAB_STORE_USER))
1033                return;
1034
1035        lockdep_assert_held(&n->list_lock);
1036        list_add(&page->lru, &n->full);
1037}
1038
1039static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1040{
1041        if (!(s->flags & SLAB_STORE_USER))
1042                return;
1043
1044        lockdep_assert_held(&n->list_lock);
1045        list_del(&page->lru);
1046}
1047
1048/* Tracking of the number of slabs for debugging purposes */
1049static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1050{
1051        struct kmem_cache_node *n = get_node(s, node);
1052
1053        return atomic_long_read(&n->nr_slabs);
1054}
1055
1056static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1057{
1058        return atomic_long_read(&n->nr_slabs);
1059}
1060
1061static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1062{
1063        struct kmem_cache_node *n = get_node(s, node);
1064
1065        /*
1066         * May be called early in order to allocate a slab for the
1067         * kmem_cache_node structure. Solve the chicken-egg
1068         * dilemma by deferring the increment of the count during
1069         * bootstrap (see early_kmem_cache_node_alloc).
1070         */
1071        if (likely(n)) {
1072                atomic_long_inc(&n->nr_slabs);
1073                atomic_long_add(objects, &n->total_objects);
1074        }
1075}
1076static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1077{
1078        struct kmem_cache_node *n = get_node(s, node);
1079
1080        atomic_long_dec(&n->nr_slabs);
1081        atomic_long_sub(objects, &n->total_objects);
1082}
1083
1084/* Object debug checks for alloc/free paths */
1085static void setup_object_debug(struct kmem_cache *s, struct page *page,
1086                                                                void *object)
1087{
1088        if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1089                return;
1090
1091        init_object(s, object, SLUB_RED_INACTIVE);
1092        init_tracking(s, object);
1093}
1094
1095static inline int alloc_consistency_checks(struct kmem_cache *s,
1096                                        struct page *page,
1097                                        void *object, unsigned long addr)
1098{
1099        if (!check_slab(s, page))
1100                return 0;
1101
1102        if (!check_valid_pointer(s, page, object)) {
1103                object_err(s, page, object, "Freelist Pointer check fails");
1104                return 0;
1105        }
1106
1107        if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1108                return 0;
1109
1110        return 1;
1111}
1112
1113static noinline int alloc_debug_processing(struct kmem_cache *s,
1114                                        struct page *page,
1115                                        void *object, unsigned long addr)
1116{
1117        if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1118                if (!alloc_consistency_checks(s, page, object, addr))
1119                        goto bad;
1120        }
1121
1122        /* Success perform special debug activities for allocs */
1123        if (s->flags & SLAB_STORE_USER)
1124                set_track(s, object, TRACK_ALLOC, addr);
1125        trace(s, page, object, 1);
1126        init_object(s, object, SLUB_RED_ACTIVE);
1127        return 1;
1128
1129bad:
1130        if (PageSlab(page)) {
1131                /*
1132                 * If this is a slab page then lets do the best we can
1133                 * to avoid issues in the future. Marking all objects
1134                 * as used avoids touching the remaining objects.
1135                 */
1136                slab_fix(s, "Marking all objects used");
1137                page->inuse = page->objects;
1138                page->freelist = NULL;
1139        }
1140        return 0;
1141}
1142
1143static inline int free_consistency_checks(struct kmem_cache *s,
1144                struct page *page, void *object, unsigned long addr)
1145{
1146        if (!check_valid_pointer(s, page, object)) {
1147                slab_err(s, page, "Invalid object pointer 0x%p", object);
1148                return 0;
1149        }
1150
1151        if (on_freelist(s, page, object)) {
1152                object_err(s, page, object, "Object already free");
1153                return 0;
1154        }
1155
1156        if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1157                return 0;
1158
1159        if (unlikely(s != page->slab_cache)) {
1160                if (!PageSlab(page)) {
1161                        slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1162                                 object);
1163                } else if (!page->slab_cache) {
1164                        pr_err("SLUB <none>: no slab for object 0x%p.\n",
1165                               object);
1166                        dump_stack();
1167                } else
1168                        object_err(s, page, object,
1169                                        "page slab pointer corrupt.");
1170                return 0;
1171        }
1172        return 1;
1173}
1174
1175/* Supports checking bulk free of a constructed freelist */
1176static noinline int free_debug_processing(
1177        struct kmem_cache *s, struct page *page,
1178        void *head, void *tail, int bulk_cnt,
1179        unsigned long addr)
1180{
1181        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1182        void *object = head;
1183        int cnt = 0;
1184        unsigned long uninitialized_var(flags);
1185        int ret = 0;
1186
1187        spin_lock_irqsave(&n->list_lock, flags);
1188        slab_lock(page);
1189
1190        if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1191                if (!check_slab(s, page))
1192                        goto out;
1193        }
1194
1195next_object:
1196        cnt++;
1197
1198        if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1199                if (!free_consistency_checks(s, page, object, addr))
1200                        goto out;
1201        }
1202
1203        if (s->flags & SLAB_STORE_USER)
1204                set_track(s, object, TRACK_FREE, addr);
1205        trace(s, page, object, 0);
1206        /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1207        init_object(s, object, SLUB_RED_INACTIVE);
1208
1209        /* Reached end of constructed freelist yet? */
1210        if (object != tail) {
1211                object = get_freepointer(s, object);
1212                goto next_object;
1213        }
1214        ret = 1;
1215
1216out:
1217        if (cnt != bulk_cnt)
1218                slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1219                         bulk_cnt, cnt);
1220
1221        slab_unlock(page);
1222        spin_unlock_irqrestore(&n->list_lock, flags);
1223        if (!ret)
1224                slab_fix(s, "Object at 0x%p not freed", object);
1225        return ret;
1226}
1227
1228static int __init setup_slub_debug(char *str)
1229{
1230        slub_debug = DEBUG_DEFAULT_FLAGS;
1231        if (*str++ != '=' || !*str)
1232                /*
1233                 * No options specified. Switch on full debugging.
1234                 */
1235                goto out;
1236
1237        if (*str == ',')
1238                /*
1239                 * No options but restriction on slabs. This means full
1240                 * debugging for slabs matching a pattern.
1241                 */
1242                goto check_slabs;
1243
1244        slub_debug = 0;
1245        if (*str == '-')
1246                /*
1247                 * Switch off all debugging measures.
1248                 */
1249                goto out;
1250
1251        /*
1252         * Determine which debug features should be switched on
1253         */
1254        for (; *str && *str != ','; str++) {
1255                switch (tolower(*str)) {
1256                case 'f':
1257                        slub_debug |= SLAB_CONSISTENCY_CHECKS;
1258                        break;
1259                case 'z':
1260                        slub_debug |= SLAB_RED_ZONE;
1261                        break;
1262                case 'p':
1263                        slub_debug |= SLAB_POISON;
1264                        break;
1265                case 'u':
1266                        slub_debug |= SLAB_STORE_USER;
1267                        break;
1268                case 't':
1269                        slub_debug |= SLAB_TRACE;
1270                        break;
1271                case 'a':
1272                        slub_debug |= SLAB_FAILSLAB;
1273                        break;
1274                case 'o':
1275                        /*
1276                         * Avoid enabling debugging on caches if its minimum
1277                         * order would increase as a result.
1278                         */
1279                        disable_higher_order_debug = 1;
1280                        break;
1281                default:
1282                        pr_err("slub_debug option '%c' unknown. skipped\n",
1283                               *str);
1284                }
1285        }
1286
1287check_slabs:
1288        if (*str == ',')
1289                slub_debug_slabs = str + 1;
1290out:
1291        return 1;
1292}
1293
1294__setup("slub_debug", setup_slub_debug);
1295
1296slab_flags_t kmem_cache_flags(unsigned int object_size,
1297        slab_flags_t flags, const char *name,
1298        void (*ctor)(void *))
1299{
1300        /*
1301         * Enable debugging if selected on the kernel commandline.
1302         */
1303        if (slub_debug && (!slub_debug_slabs || (name &&
1304                !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
1305                flags |= slub_debug;
1306
1307        return flags;
1308}
1309#else /* !CONFIG_SLUB_DEBUG */
1310static inline void setup_object_debug(struct kmem_cache *s,
1311                        struct page *page, void *object) {}
1312
1313static inline int alloc_debug_processing(struct kmem_cache *s,
1314        struct page *page, void *object, unsigned long addr) { return 0; }
1315
1316static inline int free_debug_processing(
1317        struct kmem_cache *s, struct page *page,
1318        void *head, void *tail, int bulk_cnt,
1319        unsigned long addr) { return 0; }
1320
1321static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1322                        { return 1; }
1323static inline int check_object(struct kmem_cache *s, struct page *page,
1324                        void *object, u8 val) { return 1; }
1325static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1326                                        struct page *page) {}
1327static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1328                                        struct page *page) {}
1329slab_flags_t kmem_cache_flags(unsigned int object_size,
1330        slab_flags_t flags, const char *name,
1331        void (*ctor)(void *))
1332{
1333        return flags;
1334}
1335#define slub_debug 0
1336
1337#define disable_higher_order_debug 0
1338
1339static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1340                                                        { return 0; }
1341static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1342                                                        { return 0; }
1343static inline void inc_slabs_node(struct kmem_cache *s, int node,
1344                                                        int objects) {}
1345static inline void dec_slabs_node(struct kmem_cache *s, int node,
1346                                                        int objects) {}
1347
1348#endif /* CONFIG_SLUB_DEBUG */
1349
1350/*
1351 * Hooks for other subsystems that check memory allocations. In a typical
1352 * production configuration these hooks all should produce no code at all.
1353 */
1354static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1355{
1356        kmemleak_alloc(ptr, size, 1, flags);
1357        kasan_kmalloc_large(ptr, size, flags);
1358}
1359
1360static __always_inline void kfree_hook(void *x)
1361{
1362        kmemleak_free(x);
1363        kasan_kfree_large(x, _RET_IP_);
1364}
1365
1366static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
1367{
1368        kmemleak_free_recursive(x, s->flags);
1369
1370        /*
1371         * Trouble is that we may no longer disable interrupts in the fast path
1372         * So in order to make the debug calls that expect irqs to be
1373         * disabled we need to disable interrupts temporarily.
1374         */
1375#ifdef CONFIG_LOCKDEP
1376        {
1377                unsigned long flags;
1378
1379                local_irq_save(flags);
1380                debug_check_no_locks_freed(x, s->object_size);
1381                local_irq_restore(flags);
1382        }
1383#endif
1384        if (!(s->flags & SLAB_DEBUG_OBJECTS))
1385                debug_check_no_obj_freed(x, s->object_size);
1386
1387        /* KASAN might put x into memory quarantine, delaying its reuse */
1388        return kasan_slab_free(s, x, _RET_IP_);
1389}
1390
1391static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1392                                           void **head, void **tail)
1393{
1394/*
1395 * Compiler cannot detect this function can be removed if slab_free_hook()
1396 * evaluates to nothing.  Thus, catch all relevant config debug options here.
1397 */
1398#if defined(CONFIG_LOCKDEP)     ||              \
1399        defined(CONFIG_DEBUG_KMEMLEAK) ||       \
1400        defined(CONFIG_DEBUG_OBJECTS_FREE) ||   \
1401        defined(CONFIG_KASAN)
1402
1403        void *object;
1404        void *next = *head;
1405        void *old_tail = *tail ? *tail : *head;
1406
1407        /* Head and tail of the reconstructed freelist */
1408        *head = NULL;
1409        *tail = NULL;
1410
1411        do {
1412                object = next;
1413                next = get_freepointer(s, object);
1414                /* If object's reuse doesn't have to be delayed */
1415                if (!slab_free_hook(s, object)) {
1416                        /* Move object to the new freelist */
1417                        set_freepointer(s, object, *head);
1418                        *head = object;
1419                        if (!*tail)
1420                                *tail = object;
1421                }
1422        } while (object != old_tail);
1423
1424        if (*head == *tail)
1425                *tail = NULL;
1426
1427        return *head != NULL;
1428#else
1429        return true;
1430#endif
1431}
1432
1433static void setup_object(struct kmem_cache *s, struct page *page,
1434                                void *object)
1435{
1436        setup_object_debug(s, page, object);
1437        kasan_init_slab_obj(s, object);
1438        if (unlikely(s->ctor)) {
1439                kasan_unpoison_object_data(s, object);
1440                s->ctor(object);
1441                kasan_poison_object_data(s, object);
1442        }
1443}
1444
1445/*
1446 * Slab allocation and freeing
1447 */
1448static inline struct page *alloc_slab_page(struct kmem_cache *s,
1449                gfp_t flags, int node, struct kmem_cache_order_objects oo)
1450{
1451        struct page *page;
1452        unsigned int order = oo_order(oo);
1453
1454        if (node == NUMA_NO_NODE)
1455                page = alloc_pages(flags, order);
1456        else
1457                page = __alloc_pages_node(node, flags, order);
1458
1459        if (page && memcg_charge_slab(page, flags, order, s)) {
1460                __free_pages(page, order);
1461                page = NULL;
1462        }
1463
1464        return page;
1465}
1466
1467#ifdef CONFIG_SLAB_FREELIST_RANDOM
1468/* Pre-initialize the random sequence cache */
1469static int init_cache_random_seq(struct kmem_cache *s)
1470{
1471        unsigned int count = oo_objects(s->oo);
1472        int err;
1473
1474        /* Bailout if already initialised */
1475        if (s->random_seq)
1476                return 0;
1477
1478        err = cache_random_seq_create(s, count, GFP_KERNEL);
1479        if (err) {
1480                pr_err("SLUB: Unable to initialize free list for %s\n",
1481                        s->name);
1482                return err;
1483        }
1484
1485        /* Transform to an offset on the set of pages */
1486        if (s->random_seq) {
1487                unsigned int i;
1488
1489                for (i = 0; i < count; i++)
1490                        s->random_seq[i] *= s->size;
1491        }
1492        return 0;
1493}
1494
1495/* Initialize each random sequence freelist per cache */
1496static void __init init_freelist_randomization(void)
1497{
1498        struct kmem_cache *s;
1499
1500        mutex_lock(&slab_mutex);
1501
1502        list_for_each_entry(s, &slab_caches, list)
1503                init_cache_random_seq(s);
1504
1505        mutex_unlock(&slab_mutex);
1506}
1507
1508/* Get the next entry on the pre-computed freelist randomized */
1509static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1510                                unsigned long *pos, void *start,
1511                                unsigned long page_limit,
1512                                unsigned long freelist_count)
1513{
1514        unsigned int idx;
1515
1516        /*
1517         * If the target page allocation failed, the number of objects on the
1518         * page might be smaller than the usual size defined by the cache.
1519         */
1520        do {
1521                idx = s->random_seq[*pos];
1522                *pos += 1;
1523                if (*pos >= freelist_count)
1524                        *pos = 0;
1525        } while (unlikely(idx >= page_limit));
1526
1527        return (char *)start + idx;
1528}
1529
1530/* Shuffle the single linked freelist based on a random pre-computed sequence */
1531static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1532{
1533        void *start;
1534        void *cur;
1535        void *next;
1536        unsigned long idx, pos, page_limit, freelist_count;
1537
1538        if (page->objects < 2 || !s->random_seq)
1539                return false;
1540
1541        freelist_count = oo_objects(s->oo);
1542        pos = get_random_int() % freelist_count;
1543
1544        page_limit = page->objects * s->size;
1545        start = fixup_red_left(s, page_address(page));
1546
1547        /* First entry is used as the base of the freelist */
1548        cur = next_freelist_entry(s, page, &pos, start, page_limit,
1549                                freelist_count);
1550        page->freelist = cur;
1551
1552        for (idx = 1; idx < page->objects; idx++) {
1553                setup_object(s, page, cur);
1554                next = next_freelist_entry(s, page, &pos, start, page_limit,
1555                        freelist_count);
1556                set_freepointer(s, cur, next);
1557                cur = next;
1558        }
1559        setup_object(s, page, cur);
1560        set_freepointer(s, cur, NULL);
1561
1562        return true;
1563}
1564#else
1565static inline int init_cache_random_seq(struct kmem_cache *s)
1566{
1567        return 0;
1568}
1569static inline void init_freelist_randomization(void) { }
1570static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1571{
1572        return false;
1573}
1574#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1575
1576static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1577{
1578        struct page *page;
1579        struct kmem_cache_order_objects oo = s->oo;
1580        gfp_t alloc_gfp;
1581        void *start, *p;
1582        int idx, order;
1583        bool shuffle;
1584
1585        flags &= gfp_allowed_mask;
1586
1587        if (gfpflags_allow_blocking(flags))
1588                local_irq_enable();
1589
1590        flags |= s->allocflags;
1591
1592        /*
1593         * Let the initial higher-order allocation fail under memory pressure
1594         * so we fall-back to the minimum order allocation.
1595         */
1596        alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1597        if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
1598                alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
1599
1600        page = alloc_slab_page(s, alloc_gfp, node, oo);
1601        if (unlikely(!page)) {
1602                oo = s->min;
1603                alloc_gfp = flags;
1604                /*
1605                 * Allocation may have failed due to fragmentation.
1606                 * Try a lower order alloc if possible
1607                 */
1608                page = alloc_slab_page(s, alloc_gfp, node, oo);
1609                if (unlikely(!page))
1610                        goto out;
1611                stat(s, ORDER_FALLBACK);
1612        }
1613
1614        page->objects = oo_objects(oo);
1615
1616        order = compound_order(page);
1617        page->slab_cache = s;
1618        __SetPageSlab(page);
1619        if (page_is_pfmemalloc(page))
1620                SetPageSlabPfmemalloc(page);
1621
1622        start = page_address(page);
1623
1624        if (unlikely(s->flags & SLAB_POISON))
1625                memset(start, POISON_INUSE, PAGE_SIZE << order);
1626
1627        kasan_poison_slab(page);
1628
1629        shuffle = shuffle_freelist(s, page);
1630
1631        if (!shuffle) {
1632                for_each_object_idx(p, idx, s, start, page->objects) {
1633                        setup_object(s, page, p);
1634                        if (likely(idx < page->objects))
1635                                set_freepointer(s, p, p + s->size);
1636                        else
1637                                set_freepointer(s, p, NULL);
1638                }
1639                page->freelist = fixup_red_left(s, start);
1640        }
1641
1642        page->inuse = page->objects;
1643        page->frozen = 1;
1644
1645out:
1646        if (gfpflags_allow_blocking(flags))
1647                local_irq_disable();
1648        if (!page)
1649                return NULL;
1650
1651        mod_lruvec_page_state(page,
1652                (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1653                NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1654                1 << oo_order(oo));
1655
1656        inc_slabs_node(s, page_to_nid(page), page->objects);
1657
1658        return page;
1659}
1660
1661static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1662{
1663        if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
1664                gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
1665                flags &= ~GFP_SLAB_BUG_MASK;
1666                pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1667                                invalid_mask, &invalid_mask, flags, &flags);
1668                dump_stack();
1669        }
1670
1671        return allocate_slab(s,
1672                flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1673}
1674
1675static void __free_slab(struct kmem_cache *s, struct page *page)
1676{
1677        int order = compound_order(page);
1678        int pages = 1 << order;
1679
1680        if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1681                void *p;
1682
1683                slab_pad_check(s, page);
1684                for_each_object(p, s, page_address(page),
1685                                                page->objects)
1686                        check_object(s, page, p, SLUB_RED_INACTIVE);
1687        }
1688
1689        mod_lruvec_page_state(page,
1690                (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1691                NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1692                -pages);
1693
1694        __ClearPageSlabPfmemalloc(page);
1695        __ClearPageSlab(page);
1696
1697        page_mapcount_reset(page);
1698        if (current->reclaim_state)
1699                current->reclaim_state->reclaimed_slab += pages;
1700        memcg_uncharge_slab(page, order, s);
1701        __free_pages(page, order);
1702}
1703
1704#define need_reserve_slab_rcu                                           \
1705        (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1706
1707static void rcu_free_slab(struct rcu_head *h)
1708{
1709        struct page *page;
1710
1711        if (need_reserve_slab_rcu)
1712                page = virt_to_head_page(h);
1713        else
1714                page = container_of((struct list_head *)h, struct page, lru);
1715
1716        __free_slab(page->slab_cache, page);
1717}
1718
1719static void free_slab(struct kmem_cache *s, struct page *page)
1720{
1721        if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
1722                struct rcu_head *head;
1723
1724                if (need_reserve_slab_rcu) {
1725                        int order = compound_order(page);
1726                        int offset = (PAGE_SIZE << order) - s->reserved;
1727
1728                        VM_BUG_ON(s->reserved != sizeof(*head));
1729                        head = page_address(page) + offset;
1730                } else {
1731                        head = &page->rcu_head;
1732                }
1733
1734                call_rcu(head, rcu_free_slab);
1735        } else
1736                __free_slab(s, page);
1737}
1738
1739static void discard_slab(struct kmem_cache *s, struct page *page)
1740{
1741        dec_slabs_node(s, page_to_nid(page), page->objects);
1742        free_slab(s, page);
1743}
1744
1745/*
1746 * Management of partially allocated slabs.
1747 */
1748static inline void
1749__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1750{
1751        n->nr_partial++;
1752        if (tail == DEACTIVATE_TO_TAIL)
1753                list_add_tail(&page->lru, &n->partial);
1754        else
1755                list_add(&page->lru, &n->partial);
1756}
1757
1758static inline void add_partial(struct kmem_cache_node *n,
1759                                struct page *page, int tail)
1760{
1761        lockdep_assert_held(&n->list_lock);
1762        __add_partial(n, page, tail);
1763}
1764
1765static inline void remove_partial(struct kmem_cache_node *n,
1766                                        struct page *page)
1767{
1768        lockdep_assert_held(&n->list_lock);
1769        list_del(&page->lru);
1770        n->nr_partial--;
1771}
1772
1773/*
1774 * Remove slab from the partial list, freeze it and
1775 * return the pointer to the freelist.
1776 *
1777 * Returns a list of objects or NULL if it fails.
1778 */
1779static inline void *acquire_slab(struct kmem_cache *s,
1780                struct kmem_cache_node *n, struct page *page,
1781                int mode, int *objects)
1782{
1783        void *freelist;
1784        unsigned long counters;
1785        struct page new;
1786
1787        lockdep_assert_held(&n->list_lock);
1788
1789        /*
1790         * Zap the freelist and set the frozen bit.
1791         * The old freelist is the list of objects for the
1792         * per cpu allocation list.
1793         */
1794        freelist = page->freelist;
1795        counters = page->counters;
1796        new.counters = counters;
1797        *objects = new.objects - new.inuse;
1798        if (mode) {
1799                new.inuse = page->objects;
1800                new.freelist = NULL;
1801        } else {
1802                new.freelist = freelist;
1803        }
1804
1805        VM_BUG_ON(new.frozen);
1806        new.frozen = 1;
1807
1808        if (!__cmpxchg_double_slab(s, page,
1809                        freelist, counters,
1810                        new.freelist, new.counters,
1811                        "acquire_slab"))
1812                return NULL;
1813
1814        remove_partial(n, page);
1815        WARN_ON(!freelist);
1816        return freelist;
1817}
1818
1819static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1820static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1821
1822/*
1823 * Try to allocate a partial slab from a specific node.
1824 */
1825static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1826                                struct kmem_cache_cpu *c, gfp_t flags)
1827{
1828        struct page *page, *page2;
1829        void *object = NULL;
1830        unsigned int available = 0;
1831        int objects;
1832
1833        /*
1834         * Racy check. If we mistakenly see no partial slabs then we
1835         * just allocate an empty slab. If we mistakenly try to get a
1836         * partial slab and there is none available then get_partials()
1837         * will return NULL.
1838         */
1839        if (!n || !n->nr_partial)
1840                return NULL;
1841
1842        spin_lock(&n->list_lock);
1843        list_for_each_entry_safe(page, page2, &n->partial, lru) {
1844                void *t;
1845
1846                if (!pfmemalloc_match(page, flags))
1847                        continue;
1848
1849                t = acquire_slab(s, n, page, object == NULL, &objects);
1850                if (!t)
1851                        break;
1852
1853                available += objects;
1854                if (!object) {
1855                        c->page = page;
1856                        stat(s, ALLOC_FROM_PARTIAL);
1857                        object = t;
1858                } else {
1859                        put_cpu_partial(s, page, 0);
1860                        stat(s, CPU_PARTIAL_NODE);
1861                }
1862                if (!kmem_cache_has_cpu_partial(s)
1863                        || available > slub_cpu_partial(s) / 2)
1864                        break;
1865
1866        }
1867        spin_unlock(&n->list_lock);
1868        return object;
1869}
1870
1871/*
1872 * Get a page from somewhere. Search in increasing NUMA distances.
1873 */
1874static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1875                struct kmem_cache_cpu *c)
1876{
1877#ifdef CONFIG_NUMA
1878        struct zonelist *zonelist;
1879        struct zoneref *z;
1880        struct zone *zone;
1881        enum zone_type high_zoneidx = gfp_zone(flags);
1882        void *object;
1883        unsigned int cpuset_mems_cookie;
1884
1885        /*
1886         * The defrag ratio allows a configuration of the tradeoffs between
1887         * inter node defragmentation and node local allocations. A lower
1888         * defrag_ratio increases the tendency to do local allocations
1889         * instead of attempting to obtain partial slabs from other nodes.
1890         *
1891         * If the defrag_ratio is set to 0 then kmalloc() always
1892         * returns node local objects. If the ratio is higher then kmalloc()
1893         * may return off node objects because partial slabs are obtained
1894         * from other nodes and filled up.
1895         *
1896         * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
1897         * (which makes defrag_ratio = 1000) then every (well almost)
1898         * allocation will first attempt to defrag slab caches on other nodes.
1899         * This means scanning over all nodes to look for partial slabs which
1900         * may be expensive if we do it every time we are trying to find a slab
1901         * with available objects.
1902         */
1903        if (!s->remote_node_defrag_ratio ||
1904                        get_cycles() % 1024 > s->remote_node_defrag_ratio)
1905                return NULL;
1906
1907        do {
1908                cpuset_mems_cookie = read_mems_allowed_begin();
1909                zonelist = node_zonelist(mempolicy_slab_node(), flags);
1910                for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1911                        struct kmem_cache_node *n;
1912
1913                        n = get_node(s, zone_to_nid(zone));
1914
1915                        if (n && cpuset_zone_allowed(zone, flags) &&
1916                                        n->nr_partial > s->min_partial) {
1917                                object = get_partial_node(s, n, c, flags);
1918                                if (object) {
1919                                        /*
1920                                         * Don't check read_mems_allowed_retry()
1921                                         * here - if mems_allowed was updated in
1922                                         * parallel, that was a harmless race
1923                                         * between allocation and the cpuset
1924                                         * update
1925                                         */
1926                                        return object;
1927                                }
1928                        }
1929                }
1930        } while (read_mems_allowed_retry(cpuset_mems_cookie));
1931#endif
1932        return NULL;
1933}
1934
1935/*
1936 * Get a partial page, lock it and return it.
1937 */
1938static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1939                struct kmem_cache_cpu *c)
1940{
1941        void *object;
1942        int searchnode = node;
1943
1944        if (node == NUMA_NO_NODE)
1945                searchnode = numa_mem_id();
1946        else if (!node_present_pages(node))
1947                searchnode = node_to_mem_node(node);
1948
1949        object = get_partial_node(s, get_node(s, searchnode), c, flags);
1950        if (object || node != NUMA_NO_NODE)
1951                return object;
1952
1953        return get_any_partial(s, flags, c);
1954}
1955
1956#ifdef CONFIG_PREEMPT
1957/*
1958 * Calculate the next globally unique transaction for disambiguiation
1959 * during cmpxchg. The transactions start with the cpu number and are then
1960 * incremented by CONFIG_NR_CPUS.
1961 */
1962#define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
1963#else
1964/*
1965 * No preemption supported therefore also no need to check for
1966 * different cpus.
1967 */
1968#define TID_STEP 1
1969#endif
1970
1971static inline unsigned long next_tid(unsigned long tid)
1972{
1973        return tid + TID_STEP;
1974}
1975
1976static inline unsigned int tid_to_cpu(unsigned long tid)
1977{
1978        return tid % TID_STEP;
1979}
1980
1981static inline unsigned long tid_to_event(unsigned long tid)
1982{
1983        return tid / TID_STEP;
1984}
1985
1986static inline unsigned int init_tid(int cpu)
1987{
1988        return cpu;
1989}
1990
1991static inline void note_cmpxchg_failure(const char *n,
1992                const struct kmem_cache *s, unsigned long tid)
1993{
1994#ifdef SLUB_DEBUG_CMPXCHG
1995        unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1996
1997        pr_info("%s %s: cmpxchg redo ", n, s->name);
1998
1999#ifdef CONFIG_PREEMPT
2000        if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2001                pr_warn("due to cpu change %d -> %d\n",
2002                        tid_to_cpu(tid), tid_to_cpu(actual_tid));
2003        else
2004#endif
2005        if (tid_to_event(tid) != tid_to_event(actual_tid))
2006                pr_warn("due to cpu running other code. Event %ld->%ld\n",
2007                        tid_to_event(tid), tid_to_event(actual_tid));
2008        else
2009                pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2010                        actual_tid, tid, next_tid(tid));
2011#endif
2012        stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2013}
2014
2015static void init_kmem_cache_cpus(struct kmem_cache *s)
2016{
2017        int cpu;
2018
2019        for_each_possible_cpu(cpu)
2020                per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
2021}
2022
2023/*
2024 * Remove the cpu slab
2025 */
2026static void deactivate_slab(struct kmem_cache *s, struct page *page,
2027                                void *freelist, struct kmem_cache_cpu *c)
2028{
2029        enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2030        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2031        int lock = 0;
2032        enum slab_modes l = M_NONE, m = M_NONE;
2033        void *nextfree;
2034        int tail = DEACTIVATE_TO_HEAD;
2035        struct page new;
2036        struct page old;
2037
2038        if (page->freelist) {
2039                stat(s, DEACTIVATE_REMOTE_FREES);
2040                tail = DEACTIVATE_TO_TAIL;
2041        }
2042
2043        /*
2044         * Stage one: Free all available per cpu objects back
2045         * to the page freelist while it is still frozen. Leave the
2046         * last one.
2047         *
2048         * There is no need to take the list->lock because the page
2049         * is still frozen.
2050         */
2051        while (freelist && (nextfree = get_freepointer(s, freelist))) {
2052                void *prior;
2053                unsigned long counters;
2054
2055                do {
2056                        prior = page->freelist;
2057                        counters = page->counters;
2058                        set_freepointer(s, freelist, prior);
2059                        new.counters = counters;
2060                        new.inuse--;
2061                        VM_BUG_ON(!new.frozen);
2062
2063                } while (!__cmpxchg_double_slab(s, page,
2064                        prior, counters,
2065                        freelist, new.counters,
2066                        "drain percpu freelist"));
2067
2068                freelist = nextfree;
2069        }
2070
2071        /*
2072         * Stage two: Ensure that the page is unfrozen while the
2073         * list presence reflects the actual number of objects
2074         * during unfreeze.
2075         *
2076         * We setup the list membership and then perform a cmpxchg
2077         * with the count. If there is a mismatch then the page
2078         * is not unfrozen but the page is on the wrong list.
2079         *
2080         * Then we restart the process which may have to remove
2081         * the page from the list that we just put it on again
2082         * because the number of objects in the slab may have
2083         * changed.
2084         */
2085redo:
2086
2087        old.freelist = page->freelist;
2088        old.counters = page->counters;
2089        VM_BUG_ON(!old.frozen);
2090
2091        /* Determine target state of the slab */
2092        new.counters = old.counters;
2093        if (freelist) {
2094                new.inuse--;
2095                set_freepointer(s, freelist, old.freelist);
2096                new.freelist = freelist;
2097        } else
2098                new.freelist = old.freelist;
2099
2100        new.frozen = 0;
2101
2102        if (!new.inuse && n->nr_partial >= s->min_partial)
2103                m = M_FREE;
2104        else if (new.freelist) {
2105                m = M_PARTIAL;
2106                if (!lock) {
2107                        lock = 1;
2108                        /*
2109                         * Taking the spinlock removes the possiblity
2110                         * that acquire_slab() will see a slab page that
2111                         * is frozen
2112                         */
2113                        spin_lock(&n->list_lock);
2114                }
2115        } else {
2116                m = M_FULL;
2117                if (kmem_cache_debug(s) && !lock) {
2118                        lock = 1;
2119                        /*
2120                         * This also ensures that the scanning of full
2121                         * slabs from diagnostic functions will not see
2122                         * any frozen slabs.
2123                         */
2124                        spin_lock(&n->list_lock);
2125                }
2126        }
2127
2128        if (l != m) {
2129
2130                if (l == M_PARTIAL)
2131
2132                        remove_partial(n, page);
2133
2134                else if (l == M_FULL)
2135
2136                        remove_full(s, n, page);
2137
2138                if (m == M_PARTIAL) {
2139
2140                        add_partial(n, page, tail);
2141                        stat(s, tail);
2142
2143                } else if (m == M_FULL) {
2144
2145                        stat(s, DEACTIVATE_FULL);
2146                        add_full(s, n, page);
2147
2148                }
2149        }
2150
2151        l = m;
2152        if (!__cmpxchg_double_slab(s, page,
2153                                old.freelist, old.counters,
2154                                new.freelist, new.counters,
2155                                "unfreezing slab"))
2156                goto redo;
2157
2158        if (lock)
2159                spin_unlock(&n->list_lock);
2160
2161        if (m == M_FREE) {
2162                stat(s, DEACTIVATE_EMPTY);
2163                discard_slab(s, page);
2164                stat(s, FREE_SLAB);
2165        }
2166
2167        c->page = NULL;
2168        c->freelist = NULL;
2169}
2170
2171/*
2172 * Unfreeze all the cpu partial slabs.
2173 *
2174 * This function must be called with interrupts disabled
2175 * for the cpu using c (or some other guarantee must be there
2176 * to guarantee no concurrent accesses).
2177 */
2178static void unfreeze_partials(struct kmem_cache *s,
2179                struct kmem_cache_cpu *c)
2180{
2181#ifdef CONFIG_SLUB_CPU_PARTIAL
2182        struct kmem_cache_node *n = NULL, *n2 = NULL;
2183        struct page *page, *discard_page = NULL;
2184
2185        while ((page = c->partial)) {
2186                struct page new;
2187                struct page old;
2188
2189                c->partial = page->next;
2190
2191                n2 = get_node(s, page_to_nid(page));
2192                if (n != n2) {
2193                        if (n)
2194                                spin_unlock(&n->list_lock);
2195
2196                        n = n2;
2197                        spin_lock(&n->list_lock);
2198                }
2199
2200                do {
2201
2202                        old.freelist = page->freelist;
2203                        old.counters = page->counters;
2204                        VM_BUG_ON(!old.frozen);
2205
2206                        new.counters = old.counters;
2207                        new.freelist = old.freelist;
2208
2209                        new.frozen = 0;
2210
2211                } while (!__cmpxchg_double_slab(s, page,
2212                                old.freelist, old.counters,
2213                                new.freelist, new.counters,
2214                                "unfreezing slab"));
2215
2216                if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2217                        page->next = discard_page;
2218                        discard_page = page;
2219                } else {
2220                        add_partial(n, page, DEACTIVATE_TO_TAIL);
2221                        stat(s, FREE_ADD_PARTIAL);
2222                }
2223        }
2224
2225        if (n)
2226                spin_unlock(&n->list_lock);
2227
2228        while (discard_page) {
2229                page = discard_page;
2230                discard_page = discard_page->next;
2231
2232                stat(s, DEACTIVATE_EMPTY);
2233                discard_slab(s, page);
2234                stat(s, FREE_SLAB);
2235        }
2236#endif
2237}
2238
2239/*
2240 * Put a page that was just frozen (in __slab_free) into a partial page
2241 * slot if available.
2242 *
2243 * If we did not find a slot then simply move all the partials to the
2244 * per node partial list.
2245 */
2246static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2247{
2248#ifdef CONFIG_SLUB_CPU_PARTIAL
2249        struct page *oldpage;
2250        int pages;
2251        int pobjects;
2252
2253        preempt_disable();
2254        do {
2255                pages = 0;
2256                pobjects = 0;
2257                oldpage = this_cpu_read(s->cpu_slab->partial);
2258
2259                if (oldpage) {
2260                        pobjects = oldpage->pobjects;
2261                        pages = oldpage->pages;
2262                        if (drain && pobjects > s->cpu_partial) {
2263                                unsigned long flags;
2264                                /*
2265                                 * partial array is full. Move the existing
2266                                 * set to the per node partial list.
2267                                 */
2268                                local_irq_save(flags);
2269                                unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2270                                local_irq_restore(flags);
2271                                oldpage = NULL;
2272                                pobjects = 0;
2273                                pages = 0;
2274                                stat(s, CPU_PARTIAL_DRAIN);
2275                        }
2276                }
2277
2278                pages++;
2279                pobjects += page->objects - page->inuse;
2280
2281                page->pages = pages;
2282                page->pobjects = pobjects;
2283                page->next = oldpage;
2284
2285        } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2286                                                                != oldpage);
2287        if (unlikely(!s->cpu_partial)) {
2288                unsigned long flags;
2289
2290                local_irq_save(flags);
2291                unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2292                local_irq_restore(flags);
2293        }
2294        preempt_enable();
2295#endif
2296}
2297
2298static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2299{
2300        stat(s, CPUSLAB_FLUSH);
2301        deactivate_slab(s, c->page, c->freelist, c);
2302
2303        c->tid = next_tid(c->tid);
2304}
2305
2306/*
2307 * Flush cpu slab.
2308 *
2309 * Called from IPI handler with interrupts disabled.
2310 */
2311static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2312{
2313        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2314
2315        if (likely(c)) {
2316                if (c->page)
2317                        flush_slab(s, c);
2318
2319                unfreeze_partials(s, c);
2320        }
2321}
2322
2323static void flush_cpu_slab(void *d)
2324{
2325        struct kmem_cache *s = d;
2326
2327        __flush_cpu_slab(s, smp_processor_id());
2328}
2329
2330static bool has_cpu_slab(int cpu, void *info)
2331{
2332        struct kmem_cache *s = info;
2333        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2334
2335        return c->page || slub_percpu_partial(c);
2336}
2337
2338static void flush_all(struct kmem_cache *s)
2339{
2340        on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
2341}
2342
2343/*
2344 * Use the cpu notifier to insure that the cpu slabs are flushed when
2345 * necessary.
2346 */
2347static int slub_cpu_dead(unsigned int cpu)
2348{
2349        struct kmem_cache *s;
2350        unsigned long flags;
2351
2352        mutex_lock(&slab_mutex);
2353        list_for_each_entry(s, &slab_caches, list) {
2354                local_irq_save(flags);
2355                __flush_cpu_slab(s, cpu);
2356                local_irq_restore(flags);
2357        }
2358        mutex_unlock(&slab_mutex);
2359        return 0;
2360}
2361
2362/*
2363 * Check if the objects in a per cpu structure fit numa
2364 * locality expectations.
2365 */
2366static inline int node_match(struct page *page, int node)
2367{
2368#ifdef CONFIG_NUMA
2369        if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
2370                return 0;
2371#endif
2372        return 1;
2373}
2374
2375#ifdef CONFIG_SLUB_DEBUG
2376static int count_free(struct page *page)
2377{
2378        return page->objects - page->inuse;
2379}
2380
2381static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2382{
2383        return atomic_long_read(&n->total_objects);
2384}
2385#endif /* CONFIG_SLUB_DEBUG */
2386
2387#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
2388static unsigned long count_partial(struct kmem_cache_node *n,
2389                                        int (*get_count)(struct page *))
2390{
2391        unsigned long flags;
2392        unsigned long x = 0;
2393        struct page *page;
2394
2395        spin_lock_irqsave(&n->list_lock, flags);
2396        list_for_each_entry(page, &n->partial, lru)
2397                x += get_count(page);
2398        spin_unlock_irqrestore(&n->list_lock, flags);
2399        return x;
2400}
2401#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2402
2403static noinline void
2404slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2405{
2406#ifdef CONFIG_SLUB_DEBUG
2407        static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2408                                      DEFAULT_RATELIMIT_BURST);
2409        int node;
2410        struct kmem_cache_node *n;
2411
2412        if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2413                return;
2414
2415        pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2416                nid, gfpflags, &gfpflags);
2417        pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2418                s->name, s->object_size, s->size, oo_order(s->oo),
2419                oo_order(s->min));
2420
2421        if (oo_order(s->min) > get_order(s->object_size))
2422                pr_warn("  %s debugging increased min order, use slub_debug=O to disable.\n",
2423                        s->name);
2424
2425        for_each_kmem_cache_node(s, node, n) {
2426                unsigned long nr_slabs;
2427                unsigned long nr_objs;
2428                unsigned long nr_free;
2429
2430                nr_free  = count_partial(n, count_free);
2431                nr_slabs = node_nr_slabs(n);
2432                nr_objs  = node_nr_objs(n);
2433
2434                pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
2435                        node, nr_slabs, nr_objs, nr_free);
2436        }
2437#endif
2438}
2439
2440static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2441                        int node, struct kmem_cache_cpu **pc)
2442{
2443        void *freelist;
2444        struct kmem_cache_cpu *c = *pc;
2445        struct page *page;
2446
2447        freelist = get_partial(s, flags, node, c);
2448
2449        if (freelist)
2450                return freelist;
2451
2452        page = new_slab(s, flags, node);
2453        if (page) {
2454                c = raw_cpu_ptr(s->cpu_slab);
2455                if (c->page)
2456                        flush_slab(s, c);
2457
2458                /*
2459                 * No other reference to the page yet so we can
2460                 * muck around with it freely without cmpxchg
2461                 */
2462                freelist = page->freelist;
2463                page->freelist = NULL;
2464
2465                stat(s, ALLOC_SLAB);
2466                c->page = page;
2467                *pc = c;
2468        } else
2469                freelist = NULL;
2470
2471        return freelist;
2472}
2473
2474static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2475{
2476        if (unlikely(PageSlabPfmemalloc(page)))
2477                return gfp_pfmemalloc_allowed(gfpflags);
2478
2479        return true;
2480}
2481
2482/*
2483 * Check the page->freelist of a page and either transfer the freelist to the
2484 * per cpu freelist or deactivate the page.
2485 *
2486 * The page is still frozen if the return value is not NULL.
2487 *
2488 * If this function returns NULL then the page has been unfrozen.
2489 *
2490 * This function must be called with interrupt disabled.
2491 */
2492static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2493{
2494        struct page new;
2495        unsigned long counters;
2496        void *freelist;
2497
2498        do {
2499                freelist = page->freelist;
2500                counters = page->counters;
2501
2502                new.counters = counters;
2503                VM_BUG_ON(!new.frozen);
2504
2505                new.inuse = page->objects;
2506                new.frozen = freelist != NULL;
2507
2508        } while (!__cmpxchg_double_slab(s, page,
2509                freelist, counters,
2510                NULL, new.counters,
2511                "get_freelist"));
2512
2513        return freelist;
2514}
2515
2516/*
2517 * Slow path. The lockless freelist is empty or we need to perform
2518 * debugging duties.
2519 *
2520 * Processing is still very fast if new objects have been freed to the
2521 * regular freelist. In that case we simply take over the regular freelist
2522 * as the lockless freelist and zap the regular freelist.
2523 *
2524 * If that is not working then we fall back to the partial lists. We take the
2525 * first element of the freelist as the object to allocate now and move the
2526 * rest of the freelist to the lockless freelist.
2527 *
2528 * And if we were unable to get a new slab from the partial slab lists then
2529 * we need to allocate a new slab. This is the slowest path since it involves
2530 * a call to the page allocator and the setup of a new slab.
2531 *
2532 * Version of __slab_alloc to use when we know that interrupts are
2533 * already disabled (which is the case for bulk allocation).
2534 */
2535static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2536                          unsigned long addr, struct kmem_cache_cpu *c)
2537{
2538        void *freelist;
2539        struct page *page;
2540
2541        page = c->page;
2542        if (!page)
2543                goto new_slab;
2544redo:
2545
2546        if (unlikely(!node_match(page, node))) {
2547                int searchnode = node;
2548
2549                if (node != NUMA_NO_NODE && !node_present_pages(node))
2550                        searchnode = node_to_mem_node(node);
2551
2552                if (unlikely(!node_match(page, searchnode))) {
2553                        stat(s, ALLOC_NODE_MISMATCH);
2554                        deactivate_slab(s, page, c->freelist, c);
2555                        goto new_slab;
2556                }
2557        }
2558
2559        /*
2560         * By rights, we should be searching for a slab page that was
2561         * PFMEMALLOC but right now, we are losing the pfmemalloc
2562         * information when the page leaves the per-cpu allocator
2563         */
2564        if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2565                deactivate_slab(s, page, c->freelist, c);
2566                goto new_slab;
2567        }
2568
2569        /* must check again c->freelist in case of cpu migration or IRQ */
2570        freelist = c->freelist;
2571        if (freelist)
2572                goto load_freelist;
2573
2574        freelist = get_freelist(s, page);
2575
2576        if (!freelist) {
2577                c->page = NULL;
2578                stat(s, DEACTIVATE_BYPASS);
2579                goto new_slab;
2580        }
2581
2582        stat(s, ALLOC_REFILL);
2583
2584load_freelist:
2585        /*
2586         * freelist is pointing to the list of objects to be used.
2587         * page is pointing to the page from which the objects are obtained.
2588         * That page must be frozen for per cpu allocations to work.
2589         */
2590        VM_BUG_ON(!c->page->frozen);
2591        c->freelist = get_freepointer(s, freelist);
2592        c->tid = next_tid(c->tid);
2593        return freelist;
2594
2595new_slab:
2596
2597        if (slub_percpu_partial(c)) {
2598                page = c->page = slub_percpu_partial(c);
2599                slub_set_percpu_partial(c, page);
2600                stat(s, CPU_PARTIAL_ALLOC);
2601                goto redo;
2602        }
2603
2604        freelist = new_slab_objects(s, gfpflags, node, &c);
2605
2606        if (unlikely(!freelist)) {
2607                slab_out_of_memory(s, gfpflags, node);
2608                return NULL;
2609        }
2610
2611        page = c->page;
2612        if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2613                goto load_freelist;
2614
2615        /* Only entered in the debug case */
2616        if (kmem_cache_debug(s) &&
2617                        !alloc_debug_processing(s, page, freelist, addr))
2618                goto new_slab;  /* Slab failed checks. Next slab needed */
2619
2620        deactivate_slab(s, page, get_freepointer(s, freelist), c);
2621        return freelist;
2622}
2623
2624/*
2625 * Another one that disabled interrupt and compensates for possible
2626 * cpu changes by refetching the per cpu area pointer.
2627 */
2628static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2629                          unsigned long addr, struct kmem_cache_cpu *c)
2630{
2631        void *p;
2632        unsigned long flags;
2633
2634        local_irq_save(flags);
2635#ifdef CONFIG_PREEMPT
2636        /*
2637         * We may have been preempted and rescheduled on a different
2638         * cpu before disabling interrupts. Need to reload cpu area
2639         * pointer.
2640         */
2641        c = this_cpu_ptr(s->cpu_slab);
2642#endif
2643
2644        p = ___slab_alloc(s, gfpflags, node, addr, c);
2645        local_irq_restore(flags);
2646        return p;
2647}
2648
2649/*
2650 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2651 * have the fastpath folded into their functions. So no function call
2652 * overhead for requests that can be satisfied on the fastpath.
2653 *
2654 * The fastpath works by first checking if the lockless freelist can be used.
2655 * If not then __slab_alloc is called for slow processing.
2656 *
2657 * Otherwise we can simply pick the next object from the lockless free list.
2658 */
2659static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2660                gfp_t gfpflags, int node, unsigned long addr)
2661{
2662        void *object;
2663        struct kmem_cache_cpu *c;
2664        struct page *page;
2665        unsigned long tid;
2666
2667        s = slab_pre_alloc_hook(s, gfpflags);
2668        if (!s)
2669                return NULL;
2670redo:
2671        /*
2672         * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2673         * enabled. We may switch back and forth between cpus while
2674         * reading from one cpu area. That does not matter as long
2675         * as we end up on the original cpu again when doing the cmpxchg.
2676         *
2677         * We should guarantee that tid and kmem_cache are retrieved on
2678         * the same cpu. It could be different if CONFIG_PREEMPT so we need
2679         * to check if it is matched or not.
2680         */
2681        do {
2682                tid = this_cpu_read(s->cpu_slab->tid);
2683                c = raw_cpu_ptr(s->cpu_slab);
2684        } while (IS_ENABLED(CONFIG_PREEMPT) &&
2685                 unlikely(tid != READ_ONCE(c->tid)));
2686
2687        /*
2688         * Irqless object alloc/free algorithm used here depends on sequence
2689         * of fetching cpu_slab's data. tid should be fetched before anything
2690         * on c to guarantee that object and page associated with previous tid
2691         * won't be used with current tid. If we fetch tid first, object and
2692         * page could be one associated with next tid and our alloc/free
2693         * request will be failed. In this case, we will retry. So, no problem.
2694         */
2695        barrier();
2696
2697        /*
2698         * The transaction ids are globally unique per cpu and per operation on
2699         * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2700         * occurs on the right processor and that there was no operation on the
2701         * linked list in between.
2702         */
2703
2704        object = c->freelist;
2705        page = c->page;
2706        if (unlikely(!object || !node_match(page, node))) {
2707                object = __slab_alloc(s, gfpflags, node, addr, c);
2708                stat(s, ALLOC_SLOWPATH);
2709        } else {
2710                void *next_object = get_freepointer_safe(s, object);
2711
2712                /*
2713                 * The cmpxchg will only match if there was no additional
2714                 * operation and if we are on the right processor.
2715                 *
2716                 * The cmpxchg does the following atomically (without lock
2717                 * semantics!)
2718                 * 1. Relocate first pointer to the current per cpu area.
2719                 * 2. Verify that tid and freelist have not been changed
2720                 * 3. If they were not changed replace tid and freelist
2721                 *
2722                 * Since this is without lock semantics the protection is only
2723                 * against code executing on this cpu *not* from access by
2724                 * other cpus.
2725                 */
2726                if (unlikely(!this_cpu_cmpxchg_double(
2727                                s->cpu_slab->freelist, s->cpu_slab->tid,
2728                                object, tid,
2729                                next_object, next_tid(tid)))) {
2730
2731                        note_cmpxchg_failure("slab_alloc", s, tid);
2732                        goto redo;
2733                }
2734                prefetch_freepointer(s, next_object);
2735                stat(s, ALLOC_FASTPATH);
2736        }
2737
2738        if (unlikely(gfpflags & __GFP_ZERO) && object)
2739                memset(object, 0, s->object_size);
2740
2741        slab_post_alloc_hook(s, gfpflags, 1, &object);
2742
2743        return object;
2744}
2745
2746static __always_inline void *slab_alloc(struct kmem_cache *s,
2747                gfp_t gfpflags, unsigned long addr)
2748{
2749        return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2750}
2751
2752void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2753{
2754        void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2755
2756        trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2757                                s->size, gfpflags);
2758
2759        return ret;
2760}
2761EXPORT_SYMBOL(kmem_cache_alloc);
2762
2763#ifdef CONFIG_TRACING
2764void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2765{
2766        void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2767        trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2768        kasan_kmalloc(s, ret, size, gfpflags);
2769        return ret;
2770}
2771EXPORT_SYMBOL(kmem_cache_alloc_trace);
2772#endif
2773
2774#ifdef CONFIG_NUMA
2775void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2776{
2777        void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2778
2779        trace_kmem_cache_alloc_node(_RET_IP_, ret,
2780                                    s->object_size, s->size, gfpflags, node);
2781
2782        return ret;
2783}
2784EXPORT_SYMBOL(kmem_cache_alloc_node);
2785
2786#ifdef CONFIG_TRACING
2787void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2788                                    gfp_t gfpflags,
2789                                    int node, size_t size)
2790{
2791        void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2792
2793        trace_kmalloc_node(_RET_IP_, ret,
2794                           size, s->size, gfpflags, node);
2795
2796        kasan_kmalloc(s, ret, size, gfpflags);
2797        return ret;
2798}
2799EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2800#endif
2801#endif
2802
2803/*
2804 * Slow path handling. This may still be called frequently since objects
2805 * have a longer lifetime than the cpu slabs in most processing loads.
2806 *
2807 * So we still attempt to reduce cache line usage. Just take the slab
2808 * lock and free the item. If there is no additional partial page
2809 * handling required then we can return immediately.
2810 */
2811static void __slab_free(struct kmem_cache *s, struct page *page,
2812                        void *head, void *tail, int cnt,
2813                        unsigned long addr)
2814
2815{
2816        void *prior;
2817        int was_frozen;
2818        struct page new;
2819        unsigned long counters;
2820        struct kmem_cache_node *n = NULL;
2821        unsigned long uninitialized_var(flags);
2822
2823        stat(s, FREE_SLOWPATH);
2824
2825        if (kmem_cache_debug(s) &&
2826            !free_debug_processing(s, page, head, tail, cnt, addr))
2827                return;
2828
2829        do {
2830                if (unlikely(n)) {
2831                        spin_unlock_irqrestore(&n->list_lock, flags);
2832                        n = NULL;
2833                }
2834                prior = page->freelist;
2835                counters = page->counters;
2836                set_freepointer(s, tail, prior);
2837                new.counters = counters;
2838                was_frozen = new.frozen;
2839                new.inuse -= cnt;
2840                if ((!new.inuse || !prior) && !was_frozen) {
2841
2842                        if (kmem_cache_has_cpu_partial(s) && !prior) {
2843
2844                                /*
2845                                 * Slab was on no list before and will be
2846                                 * partially empty
2847                                 * We can defer the list move and instead
2848                                 * freeze it.
2849                                 */
2850                                new.frozen = 1;
2851
2852                        } else { /* Needs to be taken off a list */
2853
2854                                n = get_node(s, page_to_nid(page));
2855                                /*
2856                                 * Speculatively acquire the list_lock.
2857                                 * If the cmpxchg does not succeed then we may
2858                                 * drop the list_lock without any processing.
2859                                 *
2860                                 * Otherwise the list_lock will synchronize with
2861                                 * other processors updating the list of slabs.
2862                                 */
2863                                spin_lock_irqsave(&n->list_lock, flags);
2864
2865                        }
2866                }
2867
2868        } while (!cmpxchg_double_slab(s, page,
2869                prior, counters,
2870                head, new.counters,
2871                "__slab_free"));
2872
2873        if (likely(!n)) {
2874
2875                /*
2876                 * If we just froze the page then put it onto the
2877                 * per cpu partial list.
2878                 */
2879                if (new.frozen && !was_frozen) {
2880                        put_cpu_partial(s, page, 1);
2881                        stat(s, CPU_PARTIAL_FREE);
2882                }
2883                /*
2884                 * The list lock was not taken therefore no list
2885                 * activity can be necessary.
2886                 */
2887                if (was_frozen)
2888                        stat(s, FREE_FROZEN);
2889                return;
2890        }
2891
2892        if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2893                goto slab_empty;
2894
2895        /*
2896         * Objects left in the slab. If it was not on the partial list before
2897         * then add it.
2898         */
2899        if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2900                if (kmem_cache_debug(s))
2901                        remove_full(s, n, page);
2902                add_partial(n, page, DEACTIVATE_TO_TAIL);
2903                stat(s, FREE_ADD_PARTIAL);
2904        }
2905        spin_unlock_irqrestore(&n->list_lock, flags);
2906        return;
2907
2908slab_empty:
2909        if (prior) {
2910                /*
2911                 * Slab on the partial list.
2912                 */
2913                remove_partial(n, page);
2914                stat(s, FREE_REMOVE_PARTIAL);
2915        } else {
2916                /* Slab must be on the full list */
2917                remove_full(s, n, page);
2918        }
2919
2920        spin_unlock_irqrestore(&n->list_lock, flags);
2921        stat(s, FREE_SLAB);
2922        discard_slab(s, page);
2923}
2924
2925/*
2926 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2927 * can perform fastpath freeing without additional function calls.
2928 *
2929 * The fastpath is only possible if we are freeing to the current cpu slab
2930 * of this processor. This typically the case if we have just allocated
2931 * the item before.
2932 *
2933 * If fastpath is not possible then fall back to __slab_free where we deal
2934 * with all sorts of special processing.
2935 *
2936 * Bulk free of a freelist with several objects (all pointing to the
2937 * same page) possible by specifying head and tail ptr, plus objects
2938 * count (cnt). Bulk free indicated by tail pointer being set.
2939 */
2940static __always_inline void do_slab_free(struct kmem_cache *s,
2941                                struct page *page, void *head, void *tail,
2942                                int cnt, unsigned long addr)
2943{
2944        void *tail_obj = tail ? : head;
2945        struct kmem_cache_cpu *c;
2946        unsigned long tid;
2947redo:
2948        /*
2949         * Determine the currently cpus per cpu slab.
2950         * The cpu may change afterward. However that does not matter since
2951         * data is retrieved via this pointer. If we are on the same cpu
2952         * during the cmpxchg then the free will succeed.
2953         */
2954        do {
2955                tid = this_cpu_read(s->cpu_slab->tid);
2956                c = raw_cpu_ptr(s->cpu_slab);
2957        } while (IS_ENABLED(CONFIG_PREEMPT) &&
2958                 unlikely(tid != READ_ONCE(c->tid)));
2959
2960        /* Same with comment on barrier() in slab_alloc_node() */
2961        barrier();
2962
2963        if (likely(page == c->page)) {
2964                set_freepointer(s, tail_obj, c->freelist);
2965
2966                if (unlikely(!this_cpu_cmpxchg_double(
2967                                s->cpu_slab->freelist, s->cpu_slab->tid,
2968                                c->freelist, tid,
2969                                head, next_tid(tid)))) {
2970
2971                        note_cmpxchg_failure("slab_free", s, tid);
2972                        goto redo;
2973                }
2974                stat(s, FREE_FASTPATH);
2975        } else
2976                __slab_free(s, page, head, tail_obj, cnt, addr);
2977
2978}
2979
2980static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2981                                      void *head, void *tail, int cnt,
2982                                      unsigned long addr)
2983{
2984        /*
2985         * With KASAN enabled slab_free_freelist_hook modifies the freelist
2986         * to remove objects, whose reuse must be delayed.
2987         */
2988        if (slab_free_freelist_hook(s, &head, &tail))
2989                do_slab_free(s, page, head, tail, cnt, addr);
2990}
2991
2992#ifdef CONFIG_KASAN
2993void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
2994{
2995        do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
2996}
2997#endif
2998
2999void kmem_cache_free(struct kmem_cache *s, void *x)
3000{
3001        s = cache_from_obj(s, x);
3002        if (!s)
3003                return;
3004        slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
3005        trace_kmem_cache_free(_RET_IP_, x);
3006}
3007EXPORT_SYMBOL(kmem_cache_free);
3008
3009struct detached_freelist {
3010        struct page *page;
3011        void *tail;
3012        void *freelist;
3013        int cnt;
3014        struct kmem_cache *s;
3015};
3016
3017/*
3018 * This function progressively scans the array with free objects (with
3019 * a limited look ahead) and extract objects belonging to the same
3020 * page.  It builds a detached freelist directly within the given
3021 * page/objects.  This can happen without any need for
3022 * synchronization, because the objects are owned by running process.
3023 * The freelist is build up as a single linked list in the objects.
3024 * The idea is, that this detached freelist can then be bulk
3025 * transferred to the real freelist(s), but only requiring a single
3026 * synchronization primitive.  Look ahead in the array is limited due
3027 * to performance reasons.
3028 */
3029static inline
3030int build_detached_freelist(struct kmem_cache *s, size_t size,
3031                            void **p, struct detached_freelist *df)
3032{
3033        size_t first_skipped_index = 0;
3034        int lookahead = 3;
3035        void *object;
3036        struct page *page;
3037
3038        /* Always re-init detached_freelist */
3039        df->page = NULL;
3040
3041        do {
3042                object = p[--size];
3043                /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3044        } while (!object && size);
3045
3046        if (!object)
3047                return 0;
3048
3049        page = virt_to_head_page(object);
3050        if (!s) {
3051                /* Handle kalloc'ed objects */
3052                if (unlikely(!PageSlab(page))) {
3053                        BUG_ON(!PageCompound(page));
3054                        kfree_hook(object);
3055                        __free_pages(page, compound_order(page));
3056                        p[size] = NULL; /* mark object processed */
3057                        return size;
3058                }
3059                /* Derive kmem_cache from object */
3060                df->s = page->slab_cache;
3061        } else {
3062                df->s = cache_from_obj(s, object); /* Support for memcg */
3063        }
3064
3065        /* Start new detached freelist */
3066        df->page = page;
3067        set_freepointer(df->s, object, NULL);
3068        df->tail = object;
3069        df->freelist = object;
3070        p[size] = NULL; /* mark object processed */
3071        df->cnt = 1;
3072
3073        while (size) {
3074                object = p[--size];
3075                if (!object)
3076                        continue; /* Skip processed objects */
3077
3078                /* df->page is always set at this point */
3079                if (df->page == virt_to_head_page(object)) {
3080                        /* Opportunity build freelist */
3081                        set_freepointer(df->s, object, df->freelist);
3082                        df->freelist = object;
3083                        df->cnt++;
3084                        p[size] = NULL; /* mark object processed */
3085
3086                        continue;
3087                }
3088
3089                /* Limit look ahead search */
3090                if (!--lookahead)
3091                        break;
3092
3093                if (!first_skipped_index)
3094                        first_skipped_index = size + 1;
3095        }
3096
3097        return first_skipped_index;
3098}
3099
3100/* Note that interrupts must be enabled when calling this function. */
3101void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3102{
3103        if (WARN_ON(!size))
3104                return;
3105
3106        do {
3107                struct detached_freelist df;
3108
3109                size = build_detached_freelist(s, size, p, &df);
3110                if (!df.page)
3111                        continue;
3112
3113                slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
3114        } while (likely(size));
3115}
3116EXPORT_SYMBOL(kmem_cache_free_bulk);
3117
3118/* Note that interrupts must be enabled when calling this function. */
3119int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3120                          void **p)
3121{
3122        struct kmem_cache_cpu *c;
3123        int i;
3124
3125        /* memcg and kmem_cache debug support */
3126        s = slab_pre_alloc_hook(s, flags);
3127        if (unlikely(!s))
3128                return false;
3129        /*
3130         * Drain objects in the per cpu slab, while disabling local
3131         * IRQs, which protects against PREEMPT and interrupts
3132         * handlers invoking normal fastpath.
3133         */
3134        local_irq_disable();
3135        c = this_cpu_ptr(s->cpu_slab);
3136
3137        for (i = 0; i < size; i++) {
3138                void *object = c->freelist;
3139
3140                if (unlikely(!object)) {
3141                        /*
3142                         * Invoking slow path likely have side-effect
3143                         * of re-populating per CPU c->freelist
3144                         */
3145                        p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
3146                                            _RET_IP_, c);
3147                        if (unlikely(!p[i]))
3148                                goto error;
3149
3150                        c = this_cpu_ptr(s->cpu_slab);
3151                        continue; /* goto for-loop */
3152                }
3153                c->freelist = get_freepointer(s, object);
3154                p[i] = object;
3155        }
3156        c->tid = next_tid(c->tid);
3157        local_irq_enable();
3158
3159        /* Clear memory outside IRQ disabled fastpath loop */
3160        if (unlikely(flags & __GFP_ZERO)) {
3161                int j;
3162
3163                for (j = 0; j < i; j++)
3164                        memset(p[j], 0, s->object_size);
3165        }
3166
3167        /* memcg and kmem_cache debug support */
3168        slab_post_alloc_hook(s, flags, size, p);
3169        return i;
3170error:
3171        local_irq_enable();
3172        slab_post_alloc_hook(s, flags, i, p);
3173        __kmem_cache_free_bulk(s, i, p);
3174        return 0;
3175}
3176EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3177
3178
3179/*
3180 * Object placement in a slab is made very easy because we always start at
3181 * offset 0. If we tune the size of the object to the alignment then we can
3182 * get the required alignment by putting one properly sized object after
3183 * another.
3184 *
3185 * Notice that the allocation order determines the sizes of the per cpu
3186 * caches. Each processor has always one slab available for allocations.
3187 * Increasing the allocation order reduces the number of times that slabs
3188 * must be moved on and off the partial lists and is therefore a factor in
3189 * locking overhead.
3190 */
3191
3192/*
3193 * Mininum / Maximum order of slab pages. This influences locking overhead
3194 * and slab fragmentation. A higher order reduces the number of partial slabs
3195 * and increases the number of allocations possible without having to
3196 * take the list_lock.
3197 */
3198static unsigned int slub_min_order;
3199static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3200static unsigned int slub_min_objects;
3201
3202/*
3203 * Calculate the order of allocation given an slab object size.
3204 *
3205 * The order of allocation has significant impact on performance and other
3206 * system components. Generally order 0 allocations should be preferred since
3207 * order 0 does not cause fragmentation in the page allocator. Larger objects
3208 * be problematic to put into order 0 slabs because there may be too much
3209 * unused space left. We go to a higher order if more than 1/16th of the slab
3210 * would be wasted.
3211 *
3212 * In order to reach satisfactory performance we must ensure that a minimum
3213 * number of objects is in one slab. Otherwise we may generate too much
3214 * activity on the partial lists which requires taking the list_lock. This is
3215 * less a concern for large slabs though which are rarely used.
3216 *
3217 * slub_max_order specifies the order where we begin to stop considering the
3218 * number of objects in a slab as critical. If we reach slub_max_order then
3219 * we try to keep the page order as low as possible. So we accept more waste
3220 * of space in favor of a small page order.
3221 *
3222 * Higher order allocations also allow the placement of more objects in a
3223 * slab and thereby reduce object handling overhead. If the user has
3224 * requested a higher mininum order then we start with that one instead of
3225 * the smallest order which will fit the object.
3226 */
3227static inline unsigned int slab_order(unsigned int size,
3228                unsigned int min_objects, unsigned int max_order,
3229                unsigned int fract_leftover, unsigned int reserved)
3230{
3231        unsigned int min_order = slub_min_order;
3232        unsigned int order;
3233
3234        if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
3235                return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3236
3237        for (order = max(min_order, (unsigned int)get_order(min_objects * size + reserved));
3238                        order <= max_order; order++) {
3239
3240                unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3241                unsigned int rem;
3242
3243                rem = (slab_size - reserved) % size;
3244
3245                if (rem <= slab_size / fract_leftover)
3246                        break;
3247        }
3248
3249        return order;
3250}
3251
3252static inline int calculate_order(unsigned int size, unsigned int reserved)
3253{
3254        unsigned int order;
3255        unsigned int min_objects;
3256        unsigned int max_objects;
3257
3258        /*
3259         * Attempt to find best configuration for a slab. This
3260         * works by first attempting to generate a layout with
3261         * the best configuration and backing off gradually.
3262         *
3263         * First we increase the acceptable waste in a slab. Then
3264         * we reduce the minimum objects required in a slab.
3265         */
3266        min_objects = slub_min_objects;
3267        if (!min_objects)
3268                min_objects = 4 * (fls(nr_cpu_ids) + 1);
3269        max_objects = order_objects(slub_max_order, size, reserved);
3270        min_objects = min(min_objects, max_objects);
3271
3272        while (min_objects > 1) {
3273                unsigned int fraction;
3274
3275                fraction = 16;
3276                while (fraction >= 4) {
3277                        order = slab_order(size, min_objects,
3278                                        slub_max_order, fraction, reserved);
3279                        if (order <= slub_max_order)
3280                                return order;
3281                        fraction /= 2;
3282                }
3283                min_objects--;
3284        }
3285
3286        /*
3287         * We were unable to place multiple objects in a slab. Now
3288         * lets see if we can place a single object there.
3289         */
3290        order = slab_order(size, 1, slub_max_order, 1, reserved);
3291        if (order <= slub_max_order)
3292                return order;
3293
3294        /*
3295         * Doh this slab cannot be placed using slub_max_order.
3296         */
3297        order = slab_order(size, 1, MAX_ORDER, 1, reserved);
3298        if (order < MAX_ORDER)
3299                return order;
3300        return -ENOSYS;
3301}
3302
3303static void
3304init_kmem_cache_node(struct kmem_cache_node *n)
3305{
3306        n->nr_partial = 0;
3307        spin_lock_init(&n->list_lock);
3308        INIT_LIST_HEAD(&n->partial);
3309#ifdef CONFIG_SLUB_DEBUG
3310        atomic_long_set(&n->nr_slabs, 0);
3311        atomic_long_set(&n->total_objects, 0);
3312        INIT_LIST_HEAD(&n->full);
3313#endif
3314}
3315
3316static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
3317{
3318        BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
3319                        KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
3320
3321        /*
3322         * Must align to double word boundary for the double cmpxchg
3323         * instructions to work; see __pcpu_double_call_return_bool().
3324         */
3325        s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3326                                     2 * sizeof(void *));
3327
3328        if (!s->cpu_slab)
3329                return 0;
3330
3331        init_kmem_cache_cpus(s);
3332
3333        return 1;
3334}
3335
3336static struct kmem_cache *kmem_cache_node;
3337
3338/*
3339 * No kmalloc_node yet so do it by hand. We know that this is the first
3340 * slab on the node for this slabcache. There are no concurrent accesses
3341 * possible.
3342 *
3343 * Note that this function only works on the kmem_cache_node
3344 * when allocating for the kmem_cache_node. This is used for bootstrapping
3345 * memory on a fresh node that has no slab structures yet.
3346 */
3347static void early_kmem_cache_node_alloc(int node)
3348{
3349        struct page *page;
3350        struct kmem_cache_node *n;
3351
3352        BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3353
3354        page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3355
3356        BUG_ON(!page);
3357        if (page_to_nid(page) != node) {
3358                pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3359                pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3360        }
3361
3362        n = page->freelist;
3363        BUG_ON(!n);
3364        page->freelist = get_freepointer(kmem_cache_node, n);
3365        page->inuse = 1;
3366        page->frozen = 0;
3367        kmem_cache_node->node[node] = n;
3368#ifdef CONFIG_SLUB_DEBUG
3369        init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3370        init_tracking(kmem_cache_node, n);
3371#endif
3372        kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3373                      GFP_KERNEL);
3374        init_kmem_cache_node(n);
3375        inc_slabs_node(kmem_cache_node, node, page->objects);
3376
3377        /*
3378         * No locks need to be taken here as it has just been
3379         * initialized and there is no concurrent access.
3380         */
3381        __add_partial(n, page, DEACTIVATE_TO_HEAD);
3382}
3383
3384static void free_kmem_cache_nodes(struct kmem_cache *s)
3385{
3386        int node;
3387        struct kmem_cache_node *n;
3388
3389        for_each_kmem_cache_node(s, node, n) {
3390                s->node[node] = NULL;
3391                kmem_cache_free(kmem_cache_node, n);
3392        }
3393}
3394
3395void __kmem_cache_release(struct kmem_cache *s)
3396{
3397        cache_random_seq_destroy(s);
3398        free_percpu(s->cpu_slab);
3399        free_kmem_cache_nodes(s);
3400}
3401
3402static int init_kmem_cache_nodes(struct kmem_cache *s)
3403{
3404        int node;
3405
3406        for_each_node_state(node, N_NORMAL_MEMORY) {
3407                struct kmem_cache_node *n;
3408
3409                if (slab_state == DOWN) {
3410                        early_kmem_cache_node_alloc(node);
3411                        continue;
3412                }
3413                n = kmem_cache_alloc_node(kmem_cache_node,
3414                                                GFP_KERNEL, node);
3415
3416                if (!n) {
3417                        free_kmem_cache_nodes(s);
3418                        return 0;
3419                }
3420
3421                init_kmem_cache_node(n);
3422                s->node[node] = n;
3423        }
3424        return 1;
3425}
3426
3427static void set_min_partial(struct kmem_cache *s, unsigned long min)
3428{
3429        if (min < MIN_PARTIAL)
3430                min = MIN_PARTIAL;
3431        else if (min > MAX_PARTIAL)
3432                min = MAX_PARTIAL;
3433        s->min_partial = min;
3434}
3435
3436static void set_cpu_partial(struct kmem_cache *s)
3437{
3438#ifdef CONFIG_SLUB_CPU_PARTIAL
3439        /*
3440         * cpu_partial determined the maximum number of objects kept in the
3441         * per cpu partial lists of a processor.
3442         *
3443         * Per cpu partial lists mainly contain slabs that just have one
3444         * object freed. If they are used for allocation then they can be
3445         * filled up again with minimal effort. The slab will never hit the
3446         * per node partial lists and therefore no locking will be required.
3447         *
3448         * This setting also determines
3449         *
3450         * A) The number of objects from per cpu partial slabs dumped to the
3451         *    per node list when we reach the limit.
3452         * B) The number of objects in cpu partial slabs to extract from the
3453         *    per node list when we run out of per cpu objects. We only fetch
3454         *    50% to keep some capacity around for frees.
3455         */
3456        if (!kmem_cache_has_cpu_partial(s))
3457                s->cpu_partial = 0;
3458        else if (s->size >= PAGE_SIZE)
3459                s->cpu_partial = 2;
3460        else if (s->size >= 1024)
3461                s->cpu_partial = 6;
3462        else if (s->size >= 256)
3463                s->cpu_partial = 13;
3464        else
3465                s->cpu_partial = 30;
3466#endif
3467}
3468
3469/*
3470 * calculate_sizes() determines the order and the distribution of data within
3471 * a slab object.
3472 */
3473static int calculate_sizes(struct kmem_cache *s, int forced_order)
3474{
3475        slab_flags_t flags = s->flags;
3476        unsigned int size = s->object_size;
3477        unsigned int order;
3478
3479        /*
3480         * Round up object size to the next word boundary. We can only
3481         * place the free pointer at word boundaries and this determines
3482         * the possible location of the free pointer.
3483         */
3484        size = ALIGN(size, sizeof(void *));
3485
3486#ifdef CONFIG_SLUB_DEBUG
3487        /*
3488         * Determine if we can poison the object itself. If the user of
3489         * the slab may touch the object after free or before allocation
3490         * then we should never poison the object itself.
3491         */
3492        if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
3493                        !s->ctor)
3494                s->flags |= __OBJECT_POISON;
3495        else
3496                s->flags &= ~__OBJECT_POISON;
3497
3498
3499        /*
3500         * If we are Redzoning then check if there is some space between the
3501         * end of the object and the free pointer. If not then add an
3502         * additional word to have some bytes to store Redzone information.
3503         */
3504        if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3505                size += sizeof(void *);
3506#endif
3507
3508        /*
3509         * With that we have determined the number of bytes in actual use
3510         * by the object. This is the potential offset to the free pointer.
3511         */
3512        s->inuse = size;
3513
3514        if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
3515                s->ctor)) {
3516                /*
3517                 * Relocate free pointer after the object if it is not
3518                 * permitted to overwrite the first word of the object on
3519                 * kmem_cache_free.
3520                 *
3521                 * This is the case if we do RCU, have a constructor or
3522                 * destructor or are poisoning the objects.
3523                 */
3524                s->offset = size;
3525                size += sizeof(void *);
3526        }
3527
3528#ifdef CONFIG_SLUB_DEBUG
3529        if (flags & SLAB_STORE_USER)
3530                /*
3531                 * Need to store information about allocs and frees after
3532                 * the object.
3533                 */
3534                size += 2 * sizeof(struct track);
3535#endif
3536
3537        kasan_cache_create(s, &size, &s->flags);
3538#ifdef CONFIG_SLUB_DEBUG
3539        if (flags & SLAB_RED_ZONE) {
3540                /*
3541                 * Add some empty padding so that we can catch
3542                 * overwrites from earlier objects rather than let
3543                 * tracking information or the free pointer be
3544                 * corrupted if a user writes before the start
3545                 * of the object.
3546                 */
3547                size += sizeof(void *);
3548
3549                s->red_left_pad = sizeof(void *);
3550                s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3551                size += s->red_left_pad;
3552        }
3553#endif
3554
3555        /*
3556         * SLUB stores one object immediately after another beginning from
3557         * offset 0. In order to align the objects we have to simply size
3558         * each object to conform to the alignment.
3559         */
3560        size = ALIGN(size, s->align);
3561        s->size = size;
3562        if (forced_order >= 0)
3563                order = forced_order;
3564        else
3565                order = calculate_order(size, s->reserved);
3566
3567        if ((int)order < 0)
3568                return 0;
3569
3570        s->allocflags = 0;
3571        if (order)
3572                s->allocflags |= __GFP_COMP;
3573
3574        if (s->flags & SLAB_CACHE_DMA)
3575                s->allocflags |= GFP_DMA;
3576
3577        if (s->flags & SLAB_RECLAIM_ACCOUNT)
3578                s->allocflags |= __GFP_RECLAIMABLE;
3579
3580        /*
3581         * Determine the number of objects per slab
3582         */
3583        s->oo = oo_make(order, size, s->reserved);
3584        s->min = oo_make(get_order(size), size, s->reserved);
3585        if (oo_objects(s->oo) > oo_objects(s->max))
3586                s->max = s->oo;
3587
3588        return !!oo_objects(s->oo);
3589}
3590
3591static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
3592{
3593        s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3594        s->reserved = 0;
3595#ifdef CONFIG_SLAB_FREELIST_HARDENED
3596        s->random = get_random_long();
3597#endif
3598
3599        if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
3600                s->reserved = sizeof(struct rcu_head);
3601
3602        if (!calculate_sizes(s, -1))
3603                goto error;
3604        if (disable_higher_order_debug) {
3605                /*
3606                 * Disable debugging flags that store metadata if the min slab
3607                 * order increased.
3608                 */
3609                if (get_order(s->size) > get_order(s->object_size)) {
3610                        s->flags &= ~DEBUG_METADATA_FLAGS;
3611                        s->offset = 0;
3612                        if (!calculate_sizes(s, -1))
3613                                goto error;
3614                }
3615        }
3616
3617#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3618    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3619        if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
3620                /* Enable fast mode */
3621                s->flags |= __CMPXCHG_DOUBLE;
3622#endif
3623
3624        /*
3625         * The larger the object size is, the more pages we want on the partial
3626         * list to avoid pounding the page allocator excessively.
3627         */
3628        set_min_partial(s, ilog2(s->size) / 2);
3629
3630        set_cpu_partial(s);
3631
3632#ifdef CONFIG_NUMA
3633        s->remote_node_defrag_ratio = 1000;
3634#endif
3635
3636        /* Initialize the pre-computed randomized freelist if slab is up */
3637        if (slab_state >= UP) {
3638                if (init_cache_random_seq(s))
3639                        goto error;
3640        }
3641
3642        if (!init_kmem_cache_nodes(s))
3643                goto error;
3644
3645        if (alloc_kmem_cache_cpus(s))
3646                return 0;
3647
3648        free_kmem_cache_nodes(s);
3649error:
3650        if (flags & SLAB_PANIC)
3651                panic("Cannot create slab %s size=%u realsize=%u order=%u offset=%u flags=%lx\n",
3652                      s->name, s->size, s->size,
3653                      oo_order(s->oo), s->offset, (unsigned long)flags);
3654        return -EINVAL;
3655}
3656
3657static void list_slab_objects(struct kmem_cache *s, struct page *page,
3658                                                        const char *text)
3659{
3660#ifdef CONFIG_SLUB_DEBUG
3661        void *addr = page_address(page);
3662        void *p;
3663        unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3664                                     sizeof(long), GFP_ATOMIC);
3665        if (!map)
3666                return;
3667        slab_err(s, page, text, s->name);
3668        slab_lock(page);
3669
3670        get_map(s, page, map);
3671        for_each_object(p, s, addr, page->objects) {
3672
3673                if (!test_bit(slab_index(p, s, addr), map)) {
3674                        pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
3675                        print_tracking(s, p);
3676                }
3677        }
3678        slab_unlock(page);
3679        kfree(map);
3680#endif
3681}
3682
3683/*
3684 * Attempt to free all partial slabs on a node.
3685 * This is called from __kmem_cache_shutdown(). We must take list_lock
3686 * because sysfs file might still access partial list after the shutdowning.
3687 */
3688static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3689{
3690        LIST_HEAD(discard);
3691        struct page *page, *h;
3692
3693        BUG_ON(irqs_disabled());
3694        spin_lock_irq(&n->list_lock);
3695        list_for_each_entry_safe(page, h, &n->partial, lru) {
3696                if (!page->inuse) {
3697                        remove_partial(n, page);
3698                        list_add(&page->lru, &discard);
3699                } else {
3700                        list_slab_objects(s, page,
3701                        "Objects remaining in %s on __kmem_cache_shutdown()");
3702                }
3703        }
3704        spin_unlock_irq(&n->list_lock);
3705
3706        list_for_each_entry_safe(page, h, &discard, lru)
3707                discard_slab(s, page);
3708}
3709
3710bool __kmem_cache_empty(struct kmem_cache *s)
3711{
3712        int node;
3713        struct kmem_cache_node *n;
3714
3715        for_each_kmem_cache_node(s, node, n)
3716                if (n->nr_partial || slabs_node(s, node))
3717                        return false;
3718        return true;
3719}
3720
3721/*
3722 * Release all resources used by a slab cache.
3723 */
3724int __kmem_cache_shutdown(struct kmem_cache *s)
3725{
3726        int node;
3727        struct kmem_cache_node *n;
3728
3729        flush_all(s);
3730        /* Attempt to free all objects */
3731        for_each_kmem_cache_node(s, node, n) {
3732                free_partial(s, n);
3733                if (n->nr_partial || slabs_node(s, node))
3734                        return 1;
3735        }
3736        sysfs_slab_remove(s);
3737        return 0;
3738}
3739
3740/********************************************************************
3741 *              Kmalloc subsystem
3742 *******************************************************************/
3743
3744static int __init setup_slub_min_order(char *str)
3745{
3746        get_option(&str, (int *)&slub_min_order);
3747
3748        return 1;
3749}
3750
3751__setup("slub_min_order=", setup_slub_min_order);
3752
3753static int __init setup_slub_max_order(char *str)
3754{
3755        get_option(&str, (int *)&slub_max_order);
3756        slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
3757
3758        return 1;
3759}
3760
3761__setup("slub_max_order=", setup_slub_max_order);
3762
3763static int __init setup_slub_min_objects(char *str)
3764{
3765        get_option(&str, (int *)&slub_min_objects);
3766
3767        return 1;
3768}
3769
3770__setup("slub_min_objects=", setup_slub_min_objects);
3771
3772void *__kmalloc(size_t size, gfp_t flags)
3773{
3774        struct kmem_cache *s;
3775        void *ret;
3776
3777        if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3778                return kmalloc_large(size, flags);
3779
3780        s = kmalloc_slab(size, flags);
3781
3782        if (unlikely(ZERO_OR_NULL_PTR(s)))
3783                return s;
3784
3785        ret = slab_alloc(s, flags, _RET_IP_);
3786
3787        trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3788
3789        kasan_kmalloc(s, ret, size, flags);
3790
3791        return ret;
3792}
3793EXPORT_SYMBOL(__kmalloc);
3794
3795#ifdef CONFIG_NUMA
3796static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3797{
3798        struct page *page;
3799        void *ptr = NULL;
3800
3801        flags |= __GFP_COMP;
3802        page = alloc_pages_node(node, flags, get_order(size));
3803        if (page)
3804                ptr = page_address(page);
3805
3806        kmalloc_large_node_hook(ptr, size, flags);
3807        return ptr;
3808}
3809
3810void *__kmalloc_node(size_t size, gfp_t flags, int node)
3811{
3812        struct kmem_cache *s;
3813        void *ret;
3814
3815        if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3816                ret = kmalloc_large_node(size, flags, node);
3817
3818                trace_kmalloc_node(_RET_IP_, ret,
3819                                   size, PAGE_SIZE << get_order(size),
3820                                   flags, node);
3821
3822                return ret;
3823        }
3824
3825        s = kmalloc_slab(size, flags);
3826
3827        if (unlikely(ZERO_OR_NULL_PTR(s)))
3828                return s;
3829
3830        ret = slab_alloc_node(s, flags, node, _RET_IP_);
3831
3832        trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3833
3834        kasan_kmalloc(s, ret, size, flags);
3835
3836        return ret;
3837}
3838EXPORT_SYMBOL(__kmalloc_node);
3839#endif
3840
3841#ifdef CONFIG_HARDENED_USERCOPY
3842/*
3843 * Rejects incorrectly sized objects and objects that are to be copied
3844 * to/from userspace but do not fall entirely within the containing slab
3845 * cache's usercopy region.
3846 *
3847 * Returns NULL if check passes, otherwise const char * to name of cache
3848 * to indicate an error.
3849 */
3850void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3851                         bool to_user)
3852{
3853        struct kmem_cache *s;
3854        unsigned int offset;
3855        size_t object_size;
3856
3857        /* Find object and usable object size. */
3858        s = page->slab_cache;
3859
3860        /* Reject impossible pointers. */
3861        if (ptr < page_address(page))
3862                usercopy_abort("SLUB object not in SLUB page?!", NULL,
3863                               to_user, 0, n);
3864
3865        /* Find offset within object. */
3866        offset = (ptr - page_address(page)) % s->size;
3867
3868        /* Adjust for redzone and reject if within the redzone. */
3869        if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3870                if (offset < s->red_left_pad)
3871                        usercopy_abort("SLUB object in left red zone",
3872                                       s->name, to_user, offset, n);
3873                offset -= s->red_left_pad;
3874        }
3875
3876        /* Allow address range falling entirely within usercopy region. */
3877        if (offset >= s->useroffset &&
3878            offset - s->useroffset <= s->usersize &&
3879            n <= s->useroffset - offset + s->usersize)
3880                return;
3881
3882        /*
3883         * If the copy is still within the allocated object, produce
3884         * a warning instead of rejecting the copy. This is intended
3885         * to be a temporary method to find any missing usercopy
3886         * whitelists.
3887         */
3888        object_size = slab_ksize(s);
3889        if (usercopy_fallback &&
3890            offset <= object_size && n <= object_size - offset) {
3891                usercopy_warn("SLUB object", s->name, to_user, offset, n);
3892                return;
3893        }
3894
3895        usercopy_abort("SLUB object", s->name, to_user, offset, n);
3896}
3897#endif /* CONFIG_HARDENED_USERCOPY */
3898
3899static size_t __ksize(const void *object)
3900{
3901        struct page *page;
3902
3903        if (unlikely(object == ZERO_SIZE_PTR))
3904                return 0;
3905
3906        page = virt_to_head_page(object);
3907
3908        if (unlikely(!PageSlab(page))) {
3909                WARN_ON(!PageCompound(page));
3910                return PAGE_SIZE << compound_order(page);
3911        }
3912
3913        return slab_ksize(page->slab_cache);
3914}
3915
3916size_t ksize(const void *object)
3917{
3918        size_t size = __ksize(object);
3919        /* We assume that ksize callers could use whole allocated area,
3920         * so we need to unpoison this area.
3921         */
3922        kasan_unpoison_shadow(object, size);
3923        return size;
3924}
3925EXPORT_SYMBOL(ksize);
3926
3927void kfree(const void *x)
3928{
3929        struct page *page;
3930        void *object = (void *)x;
3931
3932        trace_kfree(_RET_IP_, x);
3933
3934        if (unlikely(ZERO_OR_NULL_PTR(x)))
3935                return;
3936
3937        page = virt_to_head_page(x);
3938        if (unlikely(!PageSlab(page))) {
3939                BUG_ON(!PageCompound(page));
3940                kfree_hook(object);
3941                __free_pages(page, compound_order(page));
3942                return;
3943        }
3944        slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
3945}
3946EXPORT_SYMBOL(kfree);
3947
3948#define SHRINK_PROMOTE_MAX 32
3949
3950/*
3951 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
3952 * up most to the head of the partial lists. New allocations will then
3953 * fill those up and thus they can be removed from the partial lists.
3954 *
3955 * The slabs with the least items are placed last. This results in them
3956 * being allocated from last increasing the chance that the last objects
3957 * are freed in them.
3958 */
3959int __kmem_cache_shrink(struct kmem_cache *s)
3960{
3961        int node;
3962        int i;
3963        struct kmem_cache_node *n;
3964        struct page *page;
3965        struct page *t;
3966        struct list_head discard;
3967        struct list_head promote[SHRINK_PROMOTE_MAX];
3968        unsigned long flags;
3969        int ret = 0;
3970
3971        flush_all(s);
3972        for_each_kmem_cache_node(s, node, n) {
3973                INIT_LIST_HEAD(&discard);
3974                for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
3975                        INIT_LIST_HEAD(promote + i);
3976
3977                spin_lock_irqsave(&n->list_lock, flags);
3978
3979                /*
3980                 * Build lists of slabs to discard or promote.
3981                 *
3982                 * Note that concurrent frees may occur while we hold the
3983                 * list_lock. page->inuse here is the upper limit.
3984                 */
3985                list_for_each_entry_safe(page, t, &n->partial, lru) {
3986                        int free = page->objects - page->inuse;
3987
3988                        /* Do not reread page->inuse */
3989                        barrier();
3990
3991                        /* We do not keep full slabs on the list */
3992                        BUG_ON(free <= 0);
3993
3994                        if (free == page->objects) {
3995                                list_move(&page->lru, &discard);
3996                                n->nr_partial--;
3997                        } else if (free <= SHRINK_PROMOTE_MAX)
3998                                list_move(&page->lru, promote + free - 1);
3999                }
4000
4001                /*
4002                 * Promote the slabs filled up most to the head of the
4003                 * partial list.
4004                 */
4005                for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
4006                        list_splice(promote + i, &n->partial);
4007
4008                spin_unlock_irqrestore(&n->list_lock, flags);
4009
4010                /* Release empty slabs */
4011                list_for_each_entry_safe(page, t, &discard, lru)
4012                        discard_slab(s, page);
4013
4014                if (slabs_node(s, node))
4015                        ret = 1;
4016        }
4017
4018        return ret;
4019}
4020
4021#ifdef CONFIG_MEMCG
4022static void kmemcg_cache_deact_after_rcu(struct kmem_cache *s)
4023{
4024        /*
4025         * Called with all the locks held after a sched RCU grace period.
4026         * Even if @s becomes empty after shrinking, we can't know that @s
4027         * doesn't have allocations already in-flight and thus can't
4028         * destroy @s until the associated memcg is released.
4029         *
4030         * However, let's remove the sysfs files for empty caches here.
4031         * Each cache has a lot of interface files which aren't
4032         * particularly useful for empty draining caches; otherwise, we can
4033         * easily end up with millions of unnecessary sysfs files on
4034         * systems which have a lot of memory and transient cgroups.
4035         */
4036        if (!__kmem_cache_shrink(s))
4037                sysfs_slab_remove(s);
4038}
4039
4040void __kmemcg_cache_deactivate(struct kmem_cache *s)
4041{
4042        /*
4043         * Disable empty slabs caching. Used to avoid pinning offline
4044         * memory cgroups by kmem pages that can be freed.
4045         */
4046        slub_set_cpu_partial(s, 0);
4047        s->min_partial = 0;
4048
4049        /*
4050         * s->cpu_partial is checked locklessly (see put_cpu_partial), so
4051         * we have to make sure the change is visible before shrinking.
4052         */
4053        slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu);
4054}
4055#endif
4056
4057static int slab_mem_going_offline_callback(void *arg)
4058{
4059        struct kmem_cache *s;
4060
4061        mutex_lock(&slab_mutex);
4062        list_for_each_entry(s, &slab_caches, list)
4063                __kmem_cache_shrink(s);
4064        mutex_unlock(&slab_mutex);
4065
4066        return 0;
4067}
4068
4069static void slab_mem_offline_callback(void *arg)
4070{
4071        struct kmem_cache_node *n;
4072        struct kmem_cache *s;
4073        struct memory_notify *marg = arg;
4074        int offline_node;
4075
4076        offline_node = marg->status_change_nid_normal;
4077
4078        /*
4079         * If the node still has available memory. we need kmem_cache_node
4080         * for it yet.
4081         */
4082        if (offline_node < 0)
4083                return;
4084
4085        mutex_lock(&slab_mutex);
4086        list_for_each_entry(s, &slab_caches, list) {
4087                n = get_node(s, offline_node);
4088                if (n) {
4089                        /*
4090                         * if n->nr_slabs > 0, slabs still exist on the node
4091                         * that is going down. We were unable to free them,
4092                         * and offline_pages() function shouldn't call this
4093                         * callback. So, we must fail.
4094                         */
4095                        BUG_ON(slabs_node(s, offline_node));
4096
4097                        s->node[offline_node] = NULL;
4098                        kmem_cache_free(kmem_cache_node, n);
4099                }
4100        }
4101        mutex_unlock(&slab_mutex);
4102}
4103
4104static int slab_mem_going_online_callback(void *arg)
4105{
4106        struct kmem_cache_node *n;
4107        struct kmem_cache *s;
4108        struct memory_notify *marg = arg;
4109        int nid = marg->status_change_nid_normal;
4110        int ret = 0;
4111
4112        /*
4113         * If the node's memory is already available, then kmem_cache_node is
4114         * already created. Nothing to do.
4115         */
4116        if (nid < 0)
4117                return 0;
4118
4119        /*
4120         * We are bringing a node online. No memory is available yet. We must
4121         * allocate a kmem_cache_node structure in order to bring the node
4122         * online.
4123         */
4124        mutex_lock(&slab_mutex);
4125        list_for_each_entry(s, &slab_caches, list) {
4126                /*
4127                 * XXX: kmem_cache_alloc_node will fallback to other nodes
4128                 *      since memory is not yet available from the node that
4129                 *      is brought up.
4130                 */
4131                n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
4132                if (!n) {
4133                        ret = -ENOMEM;
4134                        goto out;
4135                }
4136                init_kmem_cache_node(n);
4137                s->node[nid] = n;
4138        }
4139out:
4140        mutex_unlock(&slab_mutex);
4141        return ret;
4142}
4143
4144static int slab_memory_callback(struct notifier_block *self,
4145                                unsigned long action, void *arg)
4146{
4147        int ret = 0;
4148
4149        switch (action) {
4150        case MEM_GOING_ONLINE:
4151                ret = slab_mem_going_online_callback(arg);
4152                break;
4153        case MEM_GOING_OFFLINE:
4154                ret = slab_mem_going_offline_callback(arg);
4155                break;
4156        case MEM_OFFLINE:
4157        case MEM_CANCEL_ONLINE:
4158                slab_mem_offline_callback(arg);
4159                break;
4160        case MEM_ONLINE:
4161        case MEM_CANCEL_OFFLINE:
4162                break;
4163        }
4164        if (ret)
4165                ret = notifier_from_errno(ret);
4166        else
4167                ret = NOTIFY_OK;
4168        return ret;
4169}
4170
4171static struct notifier_block slab_memory_callback_nb = {
4172        .notifier_call = slab_memory_callback,
4173        .priority = SLAB_CALLBACK_PRI,
4174};
4175
4176/********************************************************************
4177 *                      Basic setup of slabs
4178 *******************************************************************/
4179
4180/*
4181 * Used for early kmem_cache structures that were allocated using
4182 * the page allocator. Allocate them properly then fix up the pointers
4183 * that may be pointing to the wrong kmem_cache structure.
4184 */
4185
4186static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
4187{
4188        int node;
4189        struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
4190        struct kmem_cache_node *n;
4191
4192        memcpy(s, static_cache, kmem_cache->object_size);
4193
4194        /*
4195         * This runs very early, and only the boot processor is supposed to be
4196         * up.  Even if it weren't true, IRQs are not up so we couldn't fire
4197         * IPIs around.
4198         */
4199        __flush_cpu_slab(s, smp_processor_id());
4200        for_each_kmem_cache_node(s, node, n) {
4201                struct page *p;
4202
4203                list_for_each_entry(p, &n->partial, lru)
4204                        p->slab_cache = s;
4205
4206#ifdef CONFIG_SLUB_DEBUG
4207                list_for_each_entry(p, &n->full, lru)
4208                        p->slab_cache = s;
4209#endif
4210        }
4211        slab_init_memcg_params(s);
4212        list_add(&s->list, &slab_caches);
4213        memcg_link_cache(s);
4214        return s;
4215}
4216
4217void __init kmem_cache_init(void)
4218{
4219        static __initdata struct kmem_cache boot_kmem_cache,
4220                boot_kmem_cache_node;
4221
4222        if (debug_guardpage_minorder())
4223                slub_max_order = 0;
4224
4225        kmem_cache_node = &boot_kmem_cache_node;
4226        kmem_cache = &boot_kmem_cache;
4227
4228        create_boot_cache(kmem_cache_node, "kmem_cache_node",
4229                sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
4230
4231        register_hotmemory_notifier(&slab_memory_callback_nb);
4232
4233        /* Able to allocate the per node structures */
4234        slab_state = PARTIAL;
4235
4236        create_boot_cache(kmem_cache, "kmem_cache",
4237                        offsetof(struct kmem_cache, node) +
4238                                nr_node_ids * sizeof(struct kmem_cache_node *),
4239                       SLAB_HWCACHE_ALIGN, 0, 0);
4240
4241        kmem_cache = bootstrap(&boot_kmem_cache);
4242
4243        /*
4244         * Allocate kmem_cache_node properly from the kmem_cache slab.
4245         * kmem_cache_node is separately allocated so no need to
4246         * update any list pointers.
4247         */
4248        kmem_cache_node = bootstrap(&boot_kmem_cache_node);
4249
4250        /* Now we can use the kmem_cache to allocate kmalloc slabs */
4251        setup_kmalloc_cache_index_table();
4252        create_kmalloc_caches(0);
4253
4254        /* Setup random freelists for each cache */
4255        init_freelist_randomization();
4256
4257        cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4258                                  slub_cpu_dead);
4259
4260        pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
4261                cache_line_size(),
4262                slub_min_order, slub_max_order, slub_min_objects,
4263                nr_cpu_ids, nr_node_ids);
4264}
4265
4266void __init kmem_cache_init_late(void)
4267{
4268}
4269
4270struct kmem_cache *
4271__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
4272                   slab_flags_t flags, void (*ctor)(void *))
4273{
4274        struct kmem_cache *s, *c;
4275
4276        s = find_mergeable(size, align, flags, name, ctor);
4277        if (s) {
4278                s->refcount++;
4279
4280                /*
4281                 * Adjust the object sizes so that we clear
4282                 * the complete object on kzalloc.
4283                 */
4284                s->object_size = max(s->object_size, size);
4285                s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
4286
4287                for_each_memcg_cache(c, s) {
4288                        c->object_size = s->object_size;
4289                        c->inuse = max(c->inuse, ALIGN(size, sizeof(void *)));
4290                }
4291
4292                if (sysfs_slab_alias(s, name)) {
4293                        s->refcount--;
4294                        s = NULL;
4295                }
4296        }
4297
4298        return s;
4299}
4300
4301int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
4302{
4303        int err;
4304
4305        err = kmem_cache_open(s, flags);
4306        if (err)
4307                return err;
4308
4309        /* Mutex is not taken during early boot */
4310        if (slab_state <= UP)
4311                return 0;
4312
4313        memcg_propagate_slab_attrs(s);
4314        err = sysfs_slab_add(s);
4315        if (err)
4316                __kmem_cache_release(s);
4317
4318        return err;
4319}
4320
4321void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4322{
4323        struct kmem_cache *s;
4324        void *ret;
4325
4326        if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4327                return kmalloc_large(size, gfpflags);
4328
4329        s = kmalloc_slab(size, gfpflags);
4330
4331        if (unlikely(ZERO_OR_NULL_PTR(s)))
4332                return s;
4333
4334        ret = slab_alloc(s, gfpflags, caller);
4335
4336        /* Honor the call site pointer we received. */
4337        trace_kmalloc(caller, ret, size, s->size, gfpflags);
4338
4339        return ret;
4340}
4341
4342#ifdef CONFIG_NUMA
4343void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4344                                        int node, unsigned long caller)
4345{
4346        struct kmem_cache *s;
4347        void *ret;
4348
4349        if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4350                ret = kmalloc_large_node(size, gfpflags, node);
4351
4352                trace_kmalloc_node(caller, ret,
4353                                   size, PAGE_SIZE << get_order(size),
4354                                   gfpflags, node);
4355
4356                return ret;
4357        }
4358
4359        s = kmalloc_slab(size, gfpflags);
4360
4361        if (unlikely(ZERO_OR_NULL_PTR(s)))
4362                return s;
4363
4364        ret = slab_alloc_node(s, gfpflags, node, caller);
4365
4366        /* Honor the call site pointer we received. */
4367        trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4368
4369        return ret;
4370}
4371#endif
4372
4373#ifdef CONFIG_SYSFS
4374static int count_inuse(struct page *page)
4375{
4376        return page->inuse;
4377}
4378
4379static int count_total(struct page *page)
4380{
4381        return page->objects;
4382}
4383#endif
4384
4385#ifdef CONFIG_SLUB_DEBUG
4386static int validate_slab(struct kmem_cache *s, struct page *page,
4387                                                unsigned long *map)
4388{
4389        void *p;
4390        void *addr = page_address(page);
4391
4392        if (!check_slab(s, page) ||
4393                        !on_freelist(s, page, NULL))
4394                return 0;
4395
4396        /* Now we know that a valid freelist exists */
4397        bitmap_zero(map, page->objects);
4398
4399        get_map(s, page, map);
4400        for_each_object(p, s, addr, page->objects) {
4401                if (test_bit(slab_index(p, s, addr), map))
4402                        if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4403                                return 0;
4404        }
4405
4406        for_each_object(p, s, addr, page->objects)
4407                if (!test_bit(slab_index(p, s, addr), map))
4408                        if (!check_object(s, page, p, SLUB_RED_ACTIVE))
4409                                return 0;
4410        return 1;
4411}
4412
4413static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4414                                                unsigned long *map)
4415{
4416        slab_lock(page);
4417        validate_slab(s, page, map);
4418        slab_unlock(page);
4419}
4420
4421static int validate_slab_node(struct kmem_cache *s,
4422                struct kmem_cache_node *n, unsigned long *map)
4423{
4424        unsigned long count = 0;
4425        struct page *page;
4426        unsigned long flags;
4427
4428        spin_lock_irqsave(&n->list_lock, flags);
4429
4430        list_for_each_entry(page, &n->partial, lru) {
4431                validate_slab_slab(s, page, map);
4432                count++;
4433        }
4434        if (count != n->nr_partial)
4435                pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4436                       s->name, count, n->nr_partial);
4437
4438        if (!(s->flags & SLAB_STORE_USER))
4439                goto out;
4440
4441        list_for_each_entry(page, &n->full, lru) {
4442                validate_slab_slab(s, page, map);
4443                count++;
4444        }
4445        if (count != atomic_long_read(&n->nr_slabs))
4446                pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4447                       s->name, count, atomic_long_read(&n->nr_slabs));
4448
4449out:
4450        spin_unlock_irqrestore(&n->list_lock, flags);
4451        return count;
4452}
4453
4454static long validate_slab_cache(struct kmem_cache *s)
4455{
4456        int node;
4457        unsigned long count = 0;
4458        unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4459                                sizeof(unsigned long), GFP_KERNEL);
4460        struct kmem_cache_node *n;
4461
4462        if (!map)
4463                return -ENOMEM;
4464
4465        flush_all(s);
4466        for_each_kmem_cache_node(s, node, n)
4467                count += validate_slab_node(s, n, map);
4468        kfree(map);
4469        return count;
4470}
4471/*
4472 * Generate lists of code addresses where slabcache objects are allocated
4473 * and freed.
4474 */
4475
4476struct location {
4477        unsigned long count;
4478        unsigned long addr;
4479        long long sum_time;
4480        long min_time;
4481        long max_time;
4482        long min_pid;
4483        long max_pid;
4484        DECLARE_BITMAP(cpus, NR_CPUS);
4485        nodemask_t nodes;
4486};
4487
4488struct loc_track {
4489        unsigned long max;
4490        unsigned long count;
4491        struct location *loc;
4492};
4493
4494static void free_loc_track(struct loc_track *t)
4495{
4496        if (t->max)
4497                free_pages((unsigned long)t->loc,
4498                        get_order(sizeof(struct location) * t->max));
4499}
4500
4501static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4502{
4503        struct location *l;
4504        int order;
4505
4506        order = get_order(sizeof(struct location) * max);
4507
4508        l = (void *)__get_free_pages(flags, order);
4509        if (!l)
4510                return 0;
4511
4512        if (t->count) {
4513                memcpy(l, t->loc, sizeof(struct location) * t->count);
4514                free_loc_track(t);
4515        }
4516        t->max = max;
4517        t->loc = l;
4518        return 1;
4519}
4520
4521static int add_location(struct loc_track *t, struct kmem_cache *s,
4522                                const struct track *track)
4523{
4524        long start, end, pos;
4525        struct location *l;
4526        unsigned long caddr;
4527        unsigned long age = jiffies - track->when;
4528
4529        start = -1;
4530        end = t->count;
4531
4532        for ( ; ; ) {
4533                pos = start + (end - start + 1) / 2;
4534
4535                /*
4536                 * There is nothing at "end". If we end up there
4537                 * we need to add something to before end.
4538                 */
4539                if (pos == end)
4540                        break;
4541
4542                caddr = t->loc[pos].addr;
4543                if (track->addr == caddr) {
4544
4545                        l = &t->loc[pos];
4546                        l->count++;
4547                        if (track->when) {
4548                                l->sum_time += age;
4549                                if (age < l->min_time)
4550                                        l->min_time = age;
4551                                if (age > l->max_time)
4552                                        l->max_time = age;
4553
4554                                if (track->pid < l->min_pid)
4555                                        l->min_pid = track->pid;
4556                                if (track->pid > l->max_pid)
4557                                        l->max_pid = track->pid;
4558
4559                                cpumask_set_cpu(track->cpu,
4560                                                to_cpumask(l->cpus));
4561                        }
4562                        node_set(page_to_nid(virt_to_page(track)), l->nodes);
4563                        return 1;
4564                }
4565
4566                if (track->addr < caddr)
4567                        end = pos;
4568                else
4569                        start = pos;
4570        }
4571
4572        /*
4573         * Not found. Insert new tracking element.
4574         */
4575        if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4576                return 0;
4577
4578        l = t->loc + pos;
4579        if (pos < t->count)
4580                memmove(l + 1, l,
4581                        (t->count - pos) * sizeof(struct location));
4582        t->count++;
4583        l->count = 1;
4584        l->addr = track->addr;
4585        l->sum_time = age;
4586        l->min_time = age;
4587        l->max_time = age;
4588        l->min_pid = track->pid;
4589        l->max_pid = track->pid;
4590        cpumask_clear(to_cpumask(l->cpus));
4591        cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4592        nodes_clear(l->nodes);
4593        node_set(page_to_nid(virt_to_page(track)), l->nodes);
4594        return 1;
4595}
4596
4597static void process_slab(struct loc_track *t, struct kmem_cache *s,
4598                struct page *page, enum track_item alloc,
4599                unsigned long *map)
4600{
4601        void *addr = page_address(page);
4602        void *p;
4603
4604        bitmap_zero(map, page->objects);
4605        get_map(s, page, map);
4606
4607        for_each_object(p, s, addr, page->objects)
4608                if (!test_bit(slab_index(p, s, addr), map))
4609                        add_location(t, s, get_track(s, p, alloc));
4610}
4611
4612static int list_locations(struct kmem_cache *s, char *buf,
4613                                        enum track_item alloc)
4614{
4615        int len = 0;
4616        unsigned long i;
4617        struct loc_track t = { 0, 0, NULL };
4618        int node;
4619        unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4620                                     sizeof(unsigned long), GFP_KERNEL);
4621        struct kmem_cache_node *n;
4622
4623        if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4624                                     GFP_KERNEL)) {
4625                kfree(map);
4626                return sprintf(buf, "Out of memory\n");
4627        }
4628        /* Push back cpu slabs */
4629        flush_all(s);
4630
4631        for_each_kmem_cache_node(s, node, n) {
4632                unsigned long flags;
4633                struct page *page;
4634
4635                if (!atomic_long_read(&n->nr_slabs))
4636                        continue;
4637
4638                spin_lock_irqsave(&n->list_lock, flags);
4639                list_for_each_entry(page, &n->partial, lru)
4640                        process_slab(&t, s, page, alloc, map);
4641                list_for_each_entry(page, &n->full, lru)
4642                        process_slab(&t, s, page, alloc, map);
4643                spin_unlock_irqrestore(&n->list_lock, flags);
4644        }
4645
4646        for (i = 0; i < t.count; i++) {
4647                struct location *l = &t.loc[i];
4648
4649                if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4650                        break;
4651                len += sprintf(buf + len, "%7ld ", l->count);
4652
4653                if (l->addr)
4654                        len += sprintf(buf + len, "%pS", (void *)l->addr);
4655                else
4656                        len += sprintf(buf + len, "<not-available>");
4657
4658                if (l->sum_time != l->min_time) {
4659                        len += sprintf(buf + len, " age=%ld/%ld/%ld",
4660                                l->min_time,
4661                                (long)div_u64(l->sum_time, l->count),
4662                                l->max_time);
4663                } else
4664                        len += sprintf(buf + len, " age=%ld",
4665                                l->min_time);
4666
4667                if (l->min_pid != l->max_pid)
4668                        len += sprintf(buf + len, " pid=%ld-%ld",
4669                                l->min_pid, l->max_pid);
4670                else
4671                        len += sprintf(buf + len, " pid=%ld",
4672                                l->min_pid);
4673
4674                if (num_online_cpus() > 1 &&
4675                                !cpumask_empty(to_cpumask(l->cpus)) &&
4676                                len < PAGE_SIZE - 60)
4677                        len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4678                                         " cpus=%*pbl",
4679                                         cpumask_pr_args(to_cpumask(l->cpus)));
4680
4681                if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4682                                len < PAGE_SIZE - 60)
4683                        len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4684                                         " nodes=%*pbl",
4685                                         nodemask_pr_args(&l->nodes));
4686
4687                len += sprintf(buf + len, "\n");
4688        }
4689
4690        free_loc_track(&t);
4691        kfree(map);
4692        if (!t.count)
4693                len += sprintf(buf, "No data\n");
4694        return len;
4695}
4696#endif
4697
4698#ifdef SLUB_RESILIENCY_TEST
4699static void __init resiliency_test(void)
4700{
4701        u8 *p;
4702
4703        BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4704
4705        pr_err("SLUB resiliency testing\n");
4706        pr_err("-----------------------\n");
4707        pr_err("A. Corruption after allocation\n");
4708
4709        p = kzalloc(16, GFP_KERNEL);
4710        p[16] = 0x12;
4711        pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4712               p + 16);
4713
4714        validate_slab_cache(kmalloc_caches[4]);
4715
4716        /* Hmmm... The next two are dangerous */
4717        p = kzalloc(32, GFP_KERNEL);
4718        p[32 + sizeof(void *)] = 0x34;
4719        pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4720               p);
4721        pr_err("If allocated object is overwritten then not detectable\n\n");
4722
4723        validate_slab_cache(kmalloc_caches[5]);
4724        p = kzalloc(64, GFP_KERNEL);
4725        p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4726        *p = 0x56;
4727        pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4728               p);
4729        pr_err("If allocated object is overwritten then not detectable\n\n");
4730        validate_slab_cache(kmalloc_caches[6]);
4731
4732        pr_err("\nB. Corruption after free\n");
4733        p = kzalloc(128, GFP_KERNEL);
4734        kfree(p);
4735        *p = 0x78;
4736        pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4737        validate_slab_cache(kmalloc_caches[7]);
4738
4739        p = kzalloc(256, GFP_KERNEL);
4740        kfree(p);
4741        p[50] = 0x9a;
4742        pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
4743        validate_slab_cache(kmalloc_caches[8]);
4744
4745        p = kzalloc(512, GFP_KERNEL);
4746        kfree(p);
4747        p[512] = 0xab;
4748        pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4749        validate_slab_cache(kmalloc_caches[9]);
4750}
4751#else
4752#ifdef CONFIG_SYSFS
4753static void resiliency_test(void) {};
4754#endif
4755#endif
4756
4757#ifdef CONFIG_SYSFS
4758enum slab_stat_type {
4759        SL_ALL,                 /* All slabs */
4760        SL_PARTIAL,             /* Only partially allocated slabs */
4761        SL_CPU,                 /* Only slabs used for cpu caches */
4762        SL_OBJECTS,             /* Determine allocated objects not slabs */
4763        SL_TOTAL                /* Determine object capacity not slabs */
4764};
4765
4766#define SO_ALL          (1 << SL_ALL)
4767#define SO_PARTIAL      (1 << SL_PARTIAL)
4768#define SO_CPU          (1 << SL_CPU)
4769#define SO_OBJECTS      (1 << SL_OBJECTS)
4770#define SO_TOTAL        (1 << SL_TOTAL)
4771
4772#ifdef CONFIG_MEMCG
4773static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
4774
4775static int __init setup_slub_memcg_sysfs(char *str)
4776{
4777        int v;
4778
4779        if (get_option(&str, &v) > 0)
4780                memcg_sysfs_enabled = v;
4781
4782        return 1;
4783}
4784
4785__setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
4786#endif
4787
4788static ssize_t show_slab_objects(struct kmem_cache *s,
4789                            char *buf, unsigned long flags)
4790{
4791        unsigned long total = 0;
4792        int node;
4793        int x;
4794        unsigned long *nodes;
4795
4796        nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4797        if (!nodes)
4798                return -ENOMEM;
4799
4800        if (flags & SO_CPU) {
4801                int cpu;
4802
4803                for_each_possible_cpu(cpu) {
4804                        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4805                                                               cpu);
4806                        int node;
4807                        struct page *page;
4808
4809                        page = READ_ONCE(c->page);
4810                        if (!page)
4811                                continue;
4812
4813                        node = page_to_nid(page);
4814                        if (flags & SO_TOTAL)
4815                                x = page->objects;
4816                        else if (flags & SO_OBJECTS)
4817                                x = page->inuse;
4818                        else
4819                                x = 1;
4820
4821                        total += x;
4822                        nodes[node] += x;
4823
4824                        page = slub_percpu_partial_read_once(c);
4825                        if (page) {
4826                                node = page_to_nid(page);
4827                                if (flags & SO_TOTAL)
4828                                        WARN_ON_ONCE(1);
4829                                else if (flags & SO_OBJECTS)
4830                                        WARN_ON_ONCE(1);
4831                                else
4832                                        x = page->pages;
4833                                total += x;
4834                                nodes[node] += x;
4835                        }
4836                }
4837        }
4838
4839        get_online_mems();
4840#ifdef CONFIG_SLUB_DEBUG
4841        if (flags & SO_ALL) {
4842                struct kmem_cache_node *n;
4843
4844                for_each_kmem_cache_node(s, node, n) {
4845
4846                        if (flags & SO_TOTAL)
4847                                x = atomic_long_read(&n->total_objects);
4848                        else if (flags & SO_OBJECTS)
4849                                x = atomic_long_read(&n->total_objects) -
4850                                        count_partial(n, count_free);
4851                        else
4852                                x = atomic_long_read(&n->nr_slabs);
4853                        total += x;
4854                        nodes[node] += x;
4855                }
4856
4857        } else
4858#endif
4859        if (flags & SO_PARTIAL) {
4860                struct kmem_cache_node *n;
4861
4862                for_each_kmem_cache_node(s, node, n) {
4863                        if (flags & SO_TOTAL)
4864                                x = count_partial(n, count_total);
4865                        else if (flags & SO_OBJECTS)
4866                                x = count_partial(n, count_inuse);
4867                        else
4868                                x = n->nr_partial;
4869                        total += x;
4870                        nodes[node] += x;
4871                }
4872        }
4873        x = sprintf(buf, "%lu", total);
4874#ifdef CONFIG_NUMA
4875        for (node = 0; node < nr_node_ids; node++)
4876                if (nodes[node])
4877                        x += sprintf(buf + x, " N%d=%lu",
4878                                        node, nodes[node]);
4879#endif
4880        put_online_mems();
4881        kfree(nodes);
4882        return x + sprintf(buf + x, "\n");
4883}
4884
4885#ifdef CONFIG_SLUB_DEBUG
4886static int any_slab_objects(struct kmem_cache *s)
4887{
4888        int node;
4889        struct kmem_cache_node *n;
4890
4891        for_each_kmem_cache_node(s, node, n)
4892                if (atomic_long_read(&n->total_objects))
4893                        return 1;
4894
4895        return 0;
4896}
4897#endif
4898
4899#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4900#define to_slab(n) container_of(n, struct kmem_cache, kobj)
4901
4902struct slab_attribute {
4903        struct attribute attr;
4904        ssize_t (*show)(struct kmem_cache *s, char *buf);
4905        ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4906};
4907
4908#define SLAB_ATTR_RO(_name) \
4909        static struct slab_attribute _name##_attr = \
4910        __ATTR(_name, 0400, _name##_show, NULL)
4911
4912#define SLAB_ATTR(_name) \
4913        static struct slab_attribute _name##_attr =  \
4914        __ATTR(_name, 0600, _name##_show, _name##_store)
4915
4916static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4917{
4918        return sprintf(buf, "%u\n", s->size);
4919}
4920SLAB_ATTR_RO(slab_size);
4921
4922static ssize_t align_show(struct kmem_cache *s, char *buf)
4923{
4924        return sprintf(buf, "%u\n", s->align);
4925}
4926SLAB_ATTR_RO(align);
4927
4928static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4929{
4930        return sprintf(buf, "%u\n", s->object_size);
4931}
4932SLAB_ATTR_RO(object_size);
4933
4934static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4935{
4936        return sprintf(buf, "%u\n", oo_objects(s->oo));
4937}
4938SLAB_ATTR_RO(objs_per_slab);
4939
4940static ssize_t order_store(struct kmem_cache *s,
4941                                const char *buf, size_t length)
4942{
4943        unsigned int order;
4944        int err;
4945
4946        err = kstrtouint(buf, 10, &order);
4947        if (err)
4948                return err;
4949
4950        if (order > slub_max_order || order < slub_min_order)
4951                return -EINVAL;
4952
4953        calculate_sizes(s, order);
4954        return length;
4955}
4956
4957static ssize_t order_show(struct kmem_cache *s, char *buf)
4958{
4959        return sprintf(buf, "%u\n", oo_order(s->oo));
4960}
4961SLAB_ATTR(order);
4962
4963static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4964{
4965        return sprintf(buf, "%lu\n", s->min_partial);
4966}
4967
4968static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4969                                 size_t length)
4970{
4971        unsigned long min;
4972        int err;
4973
4974        err = kstrtoul(buf, 10, &min);
4975        if (err)
4976                return err;
4977
4978        set_min_partial(s, min);
4979        return length;
4980}
4981SLAB_ATTR(min_partial);
4982
4983static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4984{
4985        return sprintf(buf, "%u\n", slub_cpu_partial(s));
4986}
4987
4988static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4989                                 size_t length)
4990{
4991        unsigned int objects;
4992        int err;
4993
4994        err = kstrtouint(buf, 10, &objects);
4995        if (err)
4996                return err;
4997        if (objects && !kmem_cache_has_cpu_partial(s))
4998                return -EINVAL;
4999
5000        slub_set_cpu_partial(s, objects);
5001        flush_all(s);
5002        return length;
5003}
5004SLAB_ATTR(cpu_partial);
5005
5006static ssize_t ctor_show(struct kmem_cache *s, char *buf)
5007{
5008        if (!s->ctor)
5009                return 0;
5010        return sprintf(buf, "%pS\n", s->ctor);
5011}
5012SLAB_ATTR_RO(ctor);
5013
5014static ssize_t aliases_show(struct kmem_cache *s, char *buf)
5015{
5016        return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
5017}
5018SLAB_ATTR_RO(aliases);
5019
5020static ssize_t partial_show(struct kmem_cache *s, char *buf)
5021{
5022        return show_slab_objects(s, buf, SO_PARTIAL);
5023}
5024SLAB_ATTR_RO(partial);
5025
5026static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5027{
5028        return show_slab_objects(s, buf, SO_CPU);
5029}
5030SLAB_ATTR_RO(cpu_slabs);
5031
5032static ssize_t objects_show(struct kmem_cache *s, char *buf)
5033{
5034        return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
5035}
5036SLAB_ATTR_RO(objects);
5037
5038static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5039{
5040        return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5041}
5042SLAB_ATTR_RO(objects_partial);
5043
5044static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5045{
5046        int objects = 0;
5047        int pages = 0;
5048        int cpu;
5049        int len;
5050
5051        for_each_online_cpu(cpu) {
5052                struct page *page;
5053
5054                page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5055
5056                if (page) {
5057                        pages += page->pages;
5058                        objects += page->pobjects;
5059                }
5060        }
5061
5062        len = sprintf(buf, "%d(%d)", objects, pages);
5063
5064#ifdef CONFIG_SMP
5065        for_each_online_cpu(cpu) {
5066                struct page *page;
5067
5068                page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5069
5070                if (page && len < PAGE_SIZE - 20)
5071                        len += sprintf(buf + len, " C%d=%d(%d)", cpu,
5072                                page->pobjects, page->pages);
5073        }
5074#endif
5075        return len + sprintf(buf + len, "\n");
5076}
5077SLAB_ATTR_RO(slabs_cpu_partial);
5078
5079static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5080{
5081        return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5082}
5083
5084static ssize_t reclaim_account_store(struct kmem_cache *s,
5085                                const char *buf, size_t length)
5086{
5087        s->flags &= ~SLAB_RECLAIM_ACCOUNT;
5088        if (buf[0] == '1')
5089                s->flags |= SLAB_RECLAIM_ACCOUNT;
5090        return length;
5091}
5092SLAB_ATTR(reclaim_account);
5093
5094static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5095{
5096        return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5097}
5098SLAB_ATTR_RO(hwcache_align);
5099
5100#ifdef CONFIG_ZONE_DMA
5101static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5102{
5103        return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5104}
5105SLAB_ATTR_RO(cache_dma);
5106#endif
5107
5108static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5109{
5110        return sprintf(buf, "%u\n", s->usersize);
5111}
5112SLAB_ATTR_RO(usersize);
5113
5114static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5115{
5116        return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
5117}
5118SLAB_ATTR_RO(destroy_by_rcu);
5119
5120static ssize_t reserved_show(struct kmem_cache *s, char *buf)
5121{
5122        return sprintf(buf, "%u\n", s->reserved);
5123}
5124SLAB_ATTR_RO(reserved);
5125
5126#ifdef CONFIG_SLUB_DEBUG
5127static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5128{
5129        return show_slab_objects(s, buf, SO_ALL);
5130}
5131SLAB_ATTR_RO(slabs);
5132
5133static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5134{
5135        return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5136}
5137SLAB_ATTR_RO(total_objects);
5138
5139static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5140{
5141        return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
5142}
5143
5144static ssize_t sanity_checks_store(struct kmem_cache *s,
5145                                const char *buf, size_t length)
5146{
5147        s->flags &= ~SLAB_CONSISTENCY_CHECKS;
5148        if (buf[0] == '1') {
5149                s->flags &= ~__CMPXCHG_DOUBLE;
5150                s->flags |= SLAB_CONSISTENCY_CHECKS;
5151        }
5152        return length;
5153}
5154SLAB_ATTR(sanity_checks);
5155
5156static ssize_t trace_show(struct kmem_cache *s, char *buf)
5157{
5158        return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5159}
5160
5161static ssize_t trace_store(struct kmem_cache *s, const char *buf,
5162                                                        size_t length)
5163{
5164        /*
5165         * Tracing a merged cache is going to give confusing results
5166         * as well as cause other issues like converting a mergeable
5167         * cache into an umergeable one.
5168         */
5169        if (s->refcount > 1)
5170                return -EINVAL;
5171
5172        s->flags &= ~SLAB_TRACE;
5173        if (buf[0] == '1') {
5174                s->flags &= ~__CMPXCHG_DOUBLE;
5175                s->flags |= SLAB_TRACE;
5176        }
5177        return length;
5178}
5179SLAB_ATTR(trace);
5180
5181static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5182{
5183        return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5184}
5185
5186static ssize_t red_zone_store(struct kmem_cache *s,
5187                                const char *buf, size_t length)
5188{
5189        if (any_slab_objects(s))
5190                return -EBUSY;
5191
5192        s->flags &= ~SLAB_RED_ZONE;
5193        if (buf[0] == '1') {
5194                s->flags |= SLAB_RED_ZONE;
5195        }
5196        calculate_sizes(s, -1);
5197        return length;
5198}
5199SLAB_ATTR(red_zone);
5200
5201static ssize_t poison_show(struct kmem_cache *s, char *buf)
5202{
5203        return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
5204}
5205
5206static ssize_t poison_store(struct kmem_cache *s,
5207                                const char *buf, size_t length)
5208{
5209        if (any_slab_objects(s))
5210                return -EBUSY;
5211
5212        s->flags &= ~SLAB_POISON;
5213        if (buf[0] == '1') {
5214                s->flags |= SLAB_POISON;
5215        }
5216        calculate_sizes(s, -1);
5217        return length;
5218}
5219SLAB_ATTR(poison);
5220
5221static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5222{
5223        return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5224}
5225
5226static ssize_t store_user_store(struct kmem_cache *s,
5227                                const char *buf, size_t length)
5228{
5229        if (any_slab_objects(s))
5230                return -EBUSY;
5231
5232        s->flags &= ~SLAB_STORE_USER;
5233        if (buf[0] == '1') {
5234                s->flags &= ~__CMPXCHG_DOUBLE;
5235                s->flags |= SLAB_STORE_USER;
5236        }
5237        calculate_sizes(s, -1);
5238        return length;
5239}
5240SLAB_ATTR(store_user);
5241
5242static ssize_t validate_show(struct kmem_cache *s, char *buf)
5243{
5244        return 0;
5245}
5246
5247static ssize_t validate_store(struct kmem_cache *s,
5248                        const char *buf, size_t length)
5249{
5250        int ret = -EINVAL;
5251
5252        if (buf[0] == '1') {
5253                ret = validate_slab_cache(s);
5254                if (ret >= 0)
5255                        ret = length;
5256        }
5257        return ret;
5258}
5259SLAB_ATTR(validate);
5260
5261static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5262{
5263        if (!(s->flags & SLAB_STORE_USER))
5264                return -ENOSYS;
5265        return list_locations(s, buf, TRACK_ALLOC);
5266}
5267SLAB_ATTR_RO(alloc_calls);
5268
5269static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5270{
5271        if (!(s->flags & SLAB_STORE_USER))
5272                return -ENOSYS;
5273        return list_locations(s, buf, TRACK_FREE);
5274}
5275SLAB_ATTR_RO(free_calls);
5276#endif /* CONFIG_SLUB_DEBUG */
5277
5278#ifdef CONFIG_FAILSLAB
5279static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5280{
5281        return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5282}
5283
5284static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5285                                                        size_t length)
5286{
5287        if (s->refcount > 1)
5288                return -EINVAL;
5289
5290        s->flags &= ~SLAB_FAILSLAB;
5291        if (buf[0] == '1')
5292                s->flags |= SLAB_FAILSLAB;
5293        return length;
5294}
5295SLAB_ATTR(failslab);
5296#endif
5297
5298static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5299{
5300        return 0;
5301}
5302
5303static ssize_t shrink_store(struct kmem_cache *s,
5304                        const char *buf, size_t length)
5305{
5306        if (buf[0] == '1')
5307                kmem_cache_shrink(s);
5308        else
5309                return -EINVAL;
5310        return length;
5311}
5312SLAB_ATTR(shrink);
5313
5314#ifdef CONFIG_NUMA
5315static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
5316{
5317        return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10);
5318}
5319
5320static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5321                                const char *buf, size_t length)
5322{
5323        unsigned int ratio;
5324        int err;
5325
5326        err = kstrtouint(buf, 10, &ratio);
5327        if (err)
5328                return err;
5329        if (ratio > 100)
5330                return -ERANGE;
5331
5332        s->remote_node_defrag_ratio = ratio * 10;
5333
5334        return length;
5335}
5336SLAB_ATTR(remote_node_defrag_ratio);
5337#endif
5338
5339#ifdef CONFIG_SLUB_STATS
5340static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5341{
5342        unsigned long sum  = 0;
5343        int cpu;
5344        int len;
5345        int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
5346
5347        if (!data)
5348                return -ENOMEM;
5349
5350        for_each_online_cpu(cpu) {
5351                unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5352
5353                data[cpu] = x;
5354                sum += x;
5355        }
5356
5357        len = sprintf(buf, "%lu", sum);
5358
5359#ifdef CONFIG_SMP
5360        for_each_online_cpu(cpu) {
5361                if (data[cpu] && len < PAGE_SIZE - 20)
5362                        len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
5363        }
5364#endif
5365        kfree(data);
5366        return len + sprintf(buf + len, "\n");
5367}
5368
5369static void clear_stat(struct kmem_cache *s, enum stat_item si)
5370{
5371        int cpu;
5372
5373        for_each_online_cpu(cpu)
5374                per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5375}
5376
5377#define STAT_ATTR(si, text)                                     \
5378static ssize_t text##_show(struct kmem_cache *s, char *buf)     \
5379{                                                               \
5380        return show_stat(s, buf, si);                           \
5381}                                                               \
5382static ssize_t text##_store(struct kmem_cache *s,               \
5383                                const char *buf, size_t length) \
5384{                                                               \
5385        if (buf[0] != '0')                                      \
5386                return -EINVAL;                                 \
5387        clear_stat(s, si);                                      \
5388        return length;                                          \
5389}                                                               \
5390SLAB_ATTR(text);                                                \
5391
5392STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5393STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5394STAT_ATTR(FREE_FASTPATH, free_fastpath);
5395STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5396STAT_ATTR(FREE_FROZEN, free_frozen);
5397STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5398STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5399STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5400STAT_ATTR(ALLOC_SLAB, alloc_slab);
5401STAT_ATTR(ALLOC_REFILL, alloc_refill);
5402STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5403STAT_ATTR(FREE_SLAB, free_slab);
5404STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5405STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5406STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5407STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5408STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5409STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5410STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5411STAT_ATTR(ORDER_FALLBACK, order_fallback);
5412STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5413STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5414STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5415STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5416STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5417STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5418#endif
5419
5420static struct attribute *slab_attrs[] = {
5421        &slab_size_attr.attr,
5422        &object_size_attr.attr,
5423        &objs_per_slab_attr.attr,
5424        &order_attr.attr,
5425        &min_partial_attr.attr,
5426        &cpu_partial_attr.attr,
5427        &objects_attr.attr,
5428        &objects_partial_attr.attr,
5429        &partial_attr.attr,
5430        &cpu_slabs_attr.attr,
5431        &ctor_attr.attr,
5432        &aliases_attr.attr,
5433        &align_attr.attr,
5434        &hwcache_align_attr.attr,
5435        &reclaim_account_attr.attr,
5436        &destroy_by_rcu_attr.attr,
5437        &shrink_attr.attr,
5438        &reserved_attr.attr,
5439        &slabs_cpu_partial_attr.attr,
5440#ifdef CONFIG_SLUB_DEBUG
5441        &total_objects_attr.attr,
5442        &slabs_attr.attr,
5443        &sanity_checks_attr.attr,
5444        &trace_attr.attr,
5445        &red_zone_attr.attr,
5446        &poison_attr.attr,
5447        &store_user_attr.attr,
5448        &validate_attr.attr,
5449        &alloc_calls_attr.attr,
5450        &free_calls_attr.attr,
5451#endif
5452#ifdef CONFIG_ZONE_DMA
5453        &cache_dma_attr.attr,
5454#endif
5455#ifdef CONFIG_NUMA
5456        &remote_node_defrag_ratio_attr.attr,
5457#endif
5458#ifdef CONFIG_SLUB_STATS
5459        &alloc_fastpath_attr.attr,
5460        &alloc_slowpath_attr.attr,
5461        &free_fastpath_attr.attr,
5462        &free_slowpath_attr.attr,
5463        &free_frozen_attr.attr,
5464        &free_add_partial_attr.attr,
5465        &free_remove_partial_attr.attr,
5466        &alloc_from_partial_attr.attr,
5467        &alloc_slab_attr.attr,
5468        &alloc_refill_attr.attr,
5469        &alloc_node_mismatch_attr.attr,
5470        &free_slab_attr.attr,
5471        &cpuslab_flush_attr.attr,
5472        &deactivate_full_attr.attr,
5473        &deactivate_empty_attr.attr,
5474        &deactivate_to_head_attr.attr,
5475        &deactivate_to_tail_attr.attr,
5476        &deactivate_remote_frees_attr.attr,
5477        &deactivate_bypass_attr.attr,
5478        &order_fallback_attr.attr,
5479        &cmpxchg_double_fail_attr.attr,
5480        &cmpxchg_double_cpu_fail_attr.attr,
5481        &cpu_partial_alloc_attr.attr,
5482        &cpu_partial_free_attr.attr,
5483        &cpu_partial_node_attr.attr,
5484        &cpu_partial_drain_attr.attr,
5485#endif
5486#ifdef CONFIG_FAILSLAB
5487        &failslab_attr.attr,
5488#endif
5489        &usersize_attr.attr,
5490
5491        NULL
5492};
5493
5494static const struct attribute_group slab_attr_group = {
5495        .attrs = slab_attrs,
5496};
5497
5498static ssize_t slab_attr_show(struct kobject *kobj,
5499                                struct attribute *attr,
5500                                char *buf)
5501{
5502        struct slab_attribute *attribute;
5503        struct kmem_cache *s;
5504        int err;
5505
5506        attribute = to_slab_attr(attr);
5507        s = to_slab(kobj);
5508
5509        if (!attribute->show)
5510                return -EIO;
5511
5512        err = attribute->show(s, buf);
5513
5514        return err;
5515}
5516
5517static ssize_t slab_attr_store(struct kobject *kobj,
5518                                struct attribute *attr,
5519                                const char *buf, size_t len)
5520{
5521        struct slab_attribute *attribute;
5522        struct kmem_cache *s;
5523        int err;
5524
5525        attribute = to_slab_attr(attr);
5526        s = to_slab(kobj);
5527
5528        if (!attribute->store)
5529                return -EIO;
5530
5531        err = attribute->store(s, buf, len);
5532#ifdef CONFIG_MEMCG
5533        if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5534                struct kmem_cache *c;
5535
5536                mutex_lock(&slab_mutex);
5537                if (s->max_attr_size < len)
5538                        s->max_attr_size = len;
5539
5540                /*
5541                 * This is a best effort propagation, so this function's return
5542                 * value will be determined by the parent cache only. This is
5543                 * basically because not all attributes will have a well
5544                 * defined semantics for rollbacks - most of the actions will
5545                 * have permanent effects.
5546                 *
5547                 * Returning the error value of any of the children that fail
5548                 * is not 100 % defined, in the sense that users seeing the
5549                 * error code won't be able to know anything about the state of
5550                 * the cache.
5551                 *
5552                 * Only returning the error code for the parent cache at least
5553                 * has well defined semantics. The cache being written to
5554                 * directly either failed or succeeded, in which case we loop
5555                 * through the descendants with best-effort propagation.
5556                 */
5557                for_each_memcg_cache(c, s)
5558                        attribute->store(c, buf, len);
5559                mutex_unlock(&slab_mutex);
5560        }
5561#endif
5562        return err;
5563}
5564
5565static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5566{
5567#ifdef CONFIG_MEMCG
5568        int i;
5569        char *buffer = NULL;
5570        struct kmem_cache *root_cache;
5571
5572        if (is_root_cache(s))
5573                return;
5574
5575        root_cache = s->memcg_params.root_cache;
5576
5577        /*
5578         * This mean this cache had no attribute written. Therefore, no point
5579         * in copying default values around
5580         */
5581        if (!root_cache->max_attr_size)
5582                return;
5583
5584        for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5585                char mbuf[64];
5586                char *buf;
5587                struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5588                ssize_t len;
5589
5590                if (!attr || !attr->store || !attr->show)
5591                        continue;
5592
5593                /*
5594                 * It is really bad that we have to allocate here, so we will
5595                 * do it only as a fallback. If we actually allocate, though,
5596                 * we can just use the allocated buffer until the end.
5597                 *
5598                 * Most of the slub attributes will tend to be very small in
5599                 * size, but sysfs allows buffers up to a page, so they can
5600                 * theoretically happen.
5601                 */
5602                if (buffer)
5603                        buf = buffer;
5604                else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
5605                        buf = mbuf;
5606                else {
5607                        buffer = (char *) get_zeroed_page(GFP_KERNEL);
5608                        if (WARN_ON(!buffer))
5609                                continue;
5610                        buf = buffer;
5611                }
5612
5613                len = attr->show(root_cache, buf);
5614                if (len > 0)
5615                        attr->store(s, buf, len);
5616        }
5617
5618        if (buffer)
5619                free_page((unsigned long)buffer);
5620#endif
5621}
5622
5623static void kmem_cache_release(struct kobject *k)
5624{
5625        slab_kmem_cache_release(to_slab(k));
5626}
5627
5628static const struct sysfs_ops slab_sysfs_ops = {
5629        .show = slab_attr_show,
5630        .store = slab_attr_store,
5631};
5632
5633static struct kobj_type slab_ktype = {
5634        .sysfs_ops = &slab_sysfs_ops,
5635        .release = kmem_cache_release,
5636};
5637
5638static int uevent_filter(struct kset *kset, struct kobject *kobj)
5639{
5640        struct kobj_type *ktype = get_ktype(kobj);
5641
5642        if (ktype == &slab_ktype)
5643                return 1;
5644        return 0;
5645}
5646
5647static const struct kset_uevent_ops slab_uevent_ops = {
5648        .filter = uevent_filter,
5649};
5650
5651static struct kset *slab_kset;
5652
5653static inline struct kset *cache_kset(struct kmem_cache *s)
5654{
5655#ifdef CONFIG_MEMCG
5656        if (!is_root_cache(s))
5657                return s->memcg_params.root_cache->memcg_kset;
5658#endif
5659        return slab_kset;
5660}
5661
5662#define ID_STR_LENGTH 64
5663
5664/* Create a unique string id for a slab cache:
5665 *
5666 * Format       :[flags-]size
5667 */
5668static char *create_unique_id(struct kmem_cache *s)
5669{
5670        char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5671        char *p = name;
5672
5673        BUG_ON(!name);
5674
5675        *p++ = ':';
5676        /*
5677         * First flags affecting slabcache operations. We will only
5678         * get here for aliasable slabs so we do not need to support
5679         * too many flags. The flags here must cover all flags that
5680         * are matched during merging to guarantee that the id is
5681         * unique.
5682         */
5683        if (s->flags & SLAB_CACHE_DMA)
5684                *p++ = 'd';
5685        if (s->flags & SLAB_RECLAIM_ACCOUNT)
5686                *p++ = 'a';
5687        if (s->flags & SLAB_CONSISTENCY_CHECKS)
5688                *p++ = 'F';
5689        if (s->flags & SLAB_ACCOUNT)
5690                *p++ = 'A';
5691        if (p != name + 1)
5692                *p++ = '-';
5693        p += sprintf(p, "%07u", s->size);
5694
5695        BUG_ON(p > name + ID_STR_LENGTH - 1);
5696        return name;
5697}
5698
5699static void sysfs_slab_remove_workfn(struct work_struct *work)
5700{
5701        struct kmem_cache *s =
5702                container_of(work, struct kmem_cache, kobj_remove_work);
5703
5704        if (!s->kobj.state_in_sysfs)
5705                /*
5706                 * For a memcg cache, this may be called during
5707                 * deactivation and again on shutdown.  Remove only once.
5708                 * A cache is never shut down before deactivation is
5709                 * complete, so no need to worry about synchronization.
5710                 */
5711                goto out;
5712
5713#ifdef CONFIG_MEMCG
5714        kset_unregister(s->memcg_kset);
5715#endif
5716        kobject_uevent(&s->kobj, KOBJ_REMOVE);
5717        kobject_del(&s->kobj);
5718out:
5719        kobject_put(&s->kobj);
5720}
5721
5722static int sysfs_slab_add(struct kmem_cache *s)
5723{
5724        int err;
5725        const char *name;
5726        struct kset *kset = cache_kset(s);
5727        int unmergeable = slab_unmergeable(s);
5728
5729        INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5730
5731        if (!kset) {
5732                kobject_init(&s->kobj, &slab_ktype);
5733                return 0;
5734        }
5735
5736        if (!unmergeable && disable_higher_order_debug &&
5737                        (slub_debug & DEBUG_METADATA_FLAGS))
5738                unmergeable = 1;
5739
5740        if (unmergeable) {
5741                /*
5742                 * Slabcache can never be merged so we can use the name proper.
5743                 * This is typically the case for debug situations. In that
5744                 * case we can catch duplicate names easily.
5745                 */
5746                sysfs_remove_link(&slab_kset->kobj, s->name);
5747                name = s->name;
5748        } else {
5749                /*
5750                 * Create a unique name for the slab as a target
5751                 * for the symlinks.
5752                 */
5753                name = create_unique_id(s);
5754        }
5755
5756        s->kobj.kset = kset;
5757        err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5758        if (err)
5759                goto out;
5760
5761        err = sysfs_create_group(&s->kobj, &slab_attr_group);
5762        if (err)
5763                goto out_del_kobj;
5764
5765#ifdef CONFIG_MEMCG
5766        if (is_root_cache(s) && memcg_sysfs_enabled) {
5767                s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5768                if (!s->memcg_kset) {
5769                        err = -ENOMEM;
5770                        goto out_del_kobj;
5771                }
5772        }
5773#endif
5774
5775        kobject_uevent(&s->kobj, KOBJ_ADD);
5776        if (!unmergeable) {
5777                /* Setup first alias */
5778                sysfs_slab_alias(s, s->name);
5779        }
5780out:
5781        if (!unmergeable)
5782                kfree(name);
5783        return err;
5784out_del_kobj:
5785        kobject_del(&s->kobj);
5786        goto out;
5787}
5788
5789static void sysfs_slab_remove(struct kmem_cache *s)
5790{
5791        if (slab_state < FULL)
5792                /*
5793                 * Sysfs has not been setup yet so no need to remove the
5794                 * cache from sysfs.
5795                 */
5796                return;
5797
5798        kobject_get(&s->kobj);
5799        schedule_work(&s->kobj_remove_work);
5800}
5801
5802void sysfs_slab_release(struct kmem_cache *s)
5803{
5804        if (slab_state >= FULL)
5805                kobject_put(&s->kobj);
5806}
5807
5808/*
5809 * Need to buffer aliases during bootup until sysfs becomes
5810 * available lest we lose that information.
5811 */
5812struct saved_alias {
5813        struct kmem_cache *s;
5814        const char *name;
5815        struct saved_alias *next;
5816};
5817
5818static struct saved_alias *alias_list;
5819
5820static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5821{
5822        struct saved_alias *al;
5823
5824        if (slab_state == FULL) {
5825                /*
5826                 * If we have a leftover link then remove it.
5827                 */
5828                sysfs_remove_link(&slab_kset->kobj, name);
5829                return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5830        }
5831
5832        al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5833        if (!al)
5834                return -ENOMEM;
5835
5836        al->s = s;
5837        al->name = name;
5838        al->next = alias_list;
5839        alias_list = al;
5840        return 0;
5841}
5842
5843static int __init slab_sysfs_init(void)
5844{
5845        struct kmem_cache *s;
5846        int err;
5847
5848        mutex_lock(&slab_mutex);
5849
5850        slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5851        if (!slab_kset) {
5852                mutex_unlock(&slab_mutex);
5853                pr_err("Cannot register slab subsystem.\n");
5854                return -ENOSYS;
5855        }
5856
5857        slab_state = FULL;
5858
5859        list_for_each_entry(s, &slab_caches, list) {
5860                err = sysfs_slab_add(s);
5861                if (err)
5862                        pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5863                               s->name);
5864        }
5865
5866        while (alias_list) {
5867                struct saved_alias *al = alias_list;
5868
5869                alias_list = alias_list->next;
5870                err = sysfs_slab_alias(al->s, al->name);
5871                if (err)
5872                        pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5873                               al->name);
5874                kfree(al);
5875        }
5876
5877        mutex_unlock(&slab_mutex);
5878        resiliency_test();
5879        return 0;
5880}
5881
5882__initcall(slab_sysfs_init);
5883#endif /* CONFIG_SYSFS */
5884
5885/*
5886 * The /proc/slabinfo ABI
5887 */
5888#ifdef CONFIG_SLUB_DEBUG
5889void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5890{
5891        unsigned long nr_slabs = 0;
5892        unsigned long nr_objs = 0;
5893        unsigned long nr_free = 0;
5894        int node;
5895        struct kmem_cache_node *n;
5896
5897        for_each_kmem_cache_node(s, node, n) {
5898                nr_slabs += node_nr_slabs(n);
5899                nr_objs += node_nr_objs(n);
5900                nr_free += count_partial(n, count_free);
5901        }
5902
5903        sinfo->active_objs = nr_objs - nr_free;
5904        sinfo->num_objs = nr_objs;
5905        sinfo->active_slabs = nr_slabs;
5906        sinfo->num_slabs = nr_slabs;
5907        sinfo->objects_per_slab = oo_objects(s->oo);
5908        sinfo->cache_order = oo_order(s->oo);
5909}
5910
5911void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5912{
5913}
5914
5915ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5916                       size_t count, loff_t *ppos)
5917{
5918        return -EIO;
5919}
5920#endif /* CONFIG_SLUB_DEBUG */
5921