linux/mm/slab_common.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Slab allocator functions that are independent of the allocator strategy
   4 *
   5 * (C) 2012 Christoph Lameter <cl@linux.com>
   6 */
   7#include <linux/slab.h>
   8
   9#include <linux/mm.h>
  10#include <linux/poison.h>
  11#include <linux/interrupt.h>
  12#include <linux/memory.h>
  13#include <linux/cache.h>
  14#include <linux/compiler.h>
  15#include <linux/module.h>
  16#include <linux/cpu.h>
  17#include <linux/uaccess.h>
  18#include <linux/seq_file.h>
  19#include <linux/proc_fs.h>
  20#include <linux/debugfs.h>
  21#include <linux/kasan.h>
  22#include <asm/cacheflush.h>
  23#include <asm/tlbflush.h>
  24#include <asm/page.h>
  25#include <linux/memcontrol.h>
  26
  27#define CREATE_TRACE_POINTS
  28#include <trace/events/kmem.h>
  29
  30#include "internal.h"
  31
  32#include "slab.h"
  33
  34enum slab_state slab_state;
  35LIST_HEAD(slab_caches);
  36DEFINE_MUTEX(slab_mutex);
  37struct kmem_cache *kmem_cache;
  38
  39#ifdef CONFIG_HARDENED_USERCOPY
  40bool usercopy_fallback __ro_after_init =
  41                IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
  42module_param(usercopy_fallback, bool, 0400);
  43MODULE_PARM_DESC(usercopy_fallback,
  44                "WARN instead of reject usercopy whitelist violations");
  45#endif
  46
  47static LIST_HEAD(slab_caches_to_rcu_destroy);
  48static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
  49static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
  50                    slab_caches_to_rcu_destroy_workfn);
  51
  52/*
  53 * Set of flags that will prevent slab merging
  54 */
  55#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
  56                SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
  57                SLAB_FAILSLAB | kasan_never_merge())
  58
  59#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
  60                         SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
  61
  62/*
  63 * Merge control. If this is set then no merging of slab caches will occur.
  64 */
  65static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
  66
  67static int __init setup_slab_nomerge(char *str)
  68{
  69        slab_nomerge = true;
  70        return 1;
  71}
  72
  73#ifdef CONFIG_SLUB
  74__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
  75#endif
  76
  77__setup("slab_nomerge", setup_slab_nomerge);
  78
  79/*
  80 * Determine the size of a slab object
  81 */
  82unsigned int kmem_cache_size(struct kmem_cache *s)
  83{
  84        return s->object_size;
  85}
  86EXPORT_SYMBOL(kmem_cache_size);
  87
  88#ifdef CONFIG_DEBUG_VM
  89static int kmem_cache_sanity_check(const char *name, unsigned int size)
  90{
  91        if (!name || in_interrupt() || size < sizeof(void *) ||
  92                size > KMALLOC_MAX_SIZE) {
  93                pr_err("kmem_cache_create(%s) integrity check failed\n", name);
  94                return -EINVAL;
  95        }
  96
  97        WARN_ON(strchr(name, ' '));     /* It confuses parsers */
  98        return 0;
  99}
 100#else
 101static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
 102{
 103        return 0;
 104}
 105#endif
 106
 107void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
 108{
 109        size_t i;
 110
 111        for (i = 0; i < nr; i++) {
 112                if (s)
 113                        kmem_cache_free(s, p[i]);
 114                else
 115                        kfree(p[i]);
 116        }
 117}
 118
 119int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
 120                                                                void **p)
 121{
 122        size_t i;
 123
 124        for (i = 0; i < nr; i++) {
 125                void *x = p[i] = kmem_cache_alloc(s, flags);
 126                if (!x) {
 127                        __kmem_cache_free_bulk(s, i, p);
 128                        return 0;
 129                }
 130        }
 131        return i;
 132}
 133
 134/*
 135 * Figure out what the alignment of the objects will be given a set of
 136 * flags, a user specified alignment and the size of the objects.
 137 */
 138static unsigned int calculate_alignment(slab_flags_t flags,
 139                unsigned int align, unsigned int size)
 140{
 141        /*
 142         * If the user wants hardware cache aligned objects then follow that
 143         * suggestion if the object is sufficiently large.
 144         *
 145         * The hardware cache alignment cannot override the specified
 146         * alignment though. If that is greater then use it.
 147         */
 148        if (flags & SLAB_HWCACHE_ALIGN) {
 149                unsigned int ralign;
 150
 151                ralign = cache_line_size();
 152                while (size <= ralign / 2)
 153                        ralign /= 2;
 154                align = max(align, ralign);
 155        }
 156
 157        if (align < ARCH_SLAB_MINALIGN)
 158                align = ARCH_SLAB_MINALIGN;
 159
 160        return ALIGN(align, sizeof(void *));
 161}
 162
 163/*
 164 * Find a mergeable slab cache
 165 */
 166int slab_unmergeable(struct kmem_cache *s)
 167{
 168        if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
 169                return 1;
 170
 171        if (s->ctor)
 172                return 1;
 173
 174        if (s->usersize)
 175                return 1;
 176
 177        /*
 178         * We may have set a slab to be unmergeable during bootstrap.
 179         */
 180        if (s->refcount < 0)
 181                return 1;
 182
 183        return 0;
 184}
 185
 186struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
 187                slab_flags_t flags, const char *name, void (*ctor)(void *))
 188{
 189        struct kmem_cache *s;
 190
 191        if (slab_nomerge)
 192                return NULL;
 193
 194        if (ctor)
 195                return NULL;
 196
 197        size = ALIGN(size, sizeof(void *));
 198        align = calculate_alignment(flags, align, size);
 199        size = ALIGN(size, align);
 200        flags = kmem_cache_flags(size, flags, name, NULL);
 201
 202        if (flags & SLAB_NEVER_MERGE)
 203                return NULL;
 204
 205        list_for_each_entry_reverse(s, &slab_caches, list) {
 206                if (slab_unmergeable(s))
 207                        continue;
 208
 209                if (size > s->size)
 210                        continue;
 211
 212                if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
 213                        continue;
 214                /*
 215                 * Check if alignment is compatible.
 216                 * Courtesy of Adrian Drzewiecki
 217                 */
 218                if ((s->size & ~(align - 1)) != s->size)
 219                        continue;
 220
 221                if (s->size - size >= sizeof(void *))
 222                        continue;
 223
 224                if (IS_ENABLED(CONFIG_SLAB) && align &&
 225                        (align > s->align || s->align % align))
 226                        continue;
 227
 228                return s;
 229        }
 230        return NULL;
 231}
 232
 233static struct kmem_cache *create_cache(const char *name,
 234                unsigned int object_size, unsigned int align,
 235                slab_flags_t flags, unsigned int useroffset,
 236                unsigned int usersize, void (*ctor)(void *),
 237                struct kmem_cache *root_cache)
 238{
 239        struct kmem_cache *s;
 240        int err;
 241
 242        if (WARN_ON(useroffset + usersize > object_size))
 243                useroffset = usersize = 0;
 244
 245        err = -ENOMEM;
 246        s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
 247        if (!s)
 248                goto out;
 249
 250        s->name = name;
 251        s->size = s->object_size = object_size;
 252        s->align = align;
 253        s->ctor = ctor;
 254        s->useroffset = useroffset;
 255        s->usersize = usersize;
 256
 257        err = __kmem_cache_create(s, flags);
 258        if (err)
 259                goto out_free_cache;
 260
 261        s->refcount = 1;
 262        list_add(&s->list, &slab_caches);
 263out:
 264        if (err)
 265                return ERR_PTR(err);
 266        return s;
 267
 268out_free_cache:
 269        kmem_cache_free(kmem_cache, s);
 270        goto out;
 271}
 272
 273/**
 274 * kmem_cache_create_usercopy - Create a cache with a region suitable
 275 * for copying to userspace
 276 * @name: A string which is used in /proc/slabinfo to identify this cache.
 277 * @size: The size of objects to be created in this cache.
 278 * @align: The required alignment for the objects.
 279 * @flags: SLAB flags
 280 * @useroffset: Usercopy region offset
 281 * @usersize: Usercopy region size
 282 * @ctor: A constructor for the objects.
 283 *
 284 * Cannot be called within a interrupt, but can be interrupted.
 285 * The @ctor is run when new pages are allocated by the cache.
 286 *
 287 * The flags are
 288 *
 289 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 290 * to catch references to uninitialised memory.
 291 *
 292 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
 293 * for buffer overruns.
 294 *
 295 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 296 * cacheline.  This can be beneficial if you're counting cycles as closely
 297 * as davem.
 298 *
 299 * Return: a pointer to the cache on success, NULL on failure.
 300 */
 301struct kmem_cache *
 302kmem_cache_create_usercopy(const char *name,
 303                  unsigned int size, unsigned int align,
 304                  slab_flags_t flags,
 305                  unsigned int useroffset, unsigned int usersize,
 306                  void (*ctor)(void *))
 307{
 308        struct kmem_cache *s = NULL;
 309        const char *cache_name;
 310        int err;
 311
 312        get_online_cpus();
 313        get_online_mems();
 314
 315        mutex_lock(&slab_mutex);
 316
 317        err = kmem_cache_sanity_check(name, size);
 318        if (err) {
 319                goto out_unlock;
 320        }
 321
 322        /* Refuse requests with allocator specific flags */
 323        if (flags & ~SLAB_FLAGS_PERMITTED) {
 324                err = -EINVAL;
 325                goto out_unlock;
 326        }
 327
 328        /*
 329         * Some allocators will constraint the set of valid flags to a subset
 330         * of all flags. We expect them to define CACHE_CREATE_MASK in this
 331         * case, and we'll just provide them with a sanitized version of the
 332         * passed flags.
 333         */
 334        flags &= CACHE_CREATE_MASK;
 335
 336        /* Fail closed on bad usersize of useroffset values. */
 337        if (WARN_ON(!usersize && useroffset) ||
 338            WARN_ON(size < usersize || size - usersize < useroffset))
 339                usersize = useroffset = 0;
 340
 341        if (!usersize)
 342                s = __kmem_cache_alias(name, size, align, flags, ctor);
 343        if (s)
 344                goto out_unlock;
 345
 346        cache_name = kstrdup_const(name, GFP_KERNEL);
 347        if (!cache_name) {
 348                err = -ENOMEM;
 349                goto out_unlock;
 350        }
 351
 352        s = create_cache(cache_name, size,
 353                         calculate_alignment(flags, align, size),
 354                         flags, useroffset, usersize, ctor, NULL);
 355        if (IS_ERR(s)) {
 356                err = PTR_ERR(s);
 357                kfree_const(cache_name);
 358        }
 359
 360out_unlock:
 361        mutex_unlock(&slab_mutex);
 362
 363        put_online_mems();
 364        put_online_cpus();
 365
 366        if (err) {
 367                if (flags & SLAB_PANIC)
 368                        panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
 369                                name, err);
 370                else {
 371                        pr_warn("kmem_cache_create(%s) failed with error %d\n",
 372                                name, err);
 373                        dump_stack();
 374                }
 375                return NULL;
 376        }
 377        return s;
 378}
 379EXPORT_SYMBOL(kmem_cache_create_usercopy);
 380
 381/**
 382 * kmem_cache_create - Create a cache.
 383 * @name: A string which is used in /proc/slabinfo to identify this cache.
 384 * @size: The size of objects to be created in this cache.
 385 * @align: The required alignment for the objects.
 386 * @flags: SLAB flags
 387 * @ctor: A constructor for the objects.
 388 *
 389 * Cannot be called within a interrupt, but can be interrupted.
 390 * The @ctor is run when new pages are allocated by the cache.
 391 *
 392 * The flags are
 393 *
 394 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 395 * to catch references to uninitialised memory.
 396 *
 397 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
 398 * for buffer overruns.
 399 *
 400 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 401 * cacheline.  This can be beneficial if you're counting cycles as closely
 402 * as davem.
 403 *
 404 * Return: a pointer to the cache on success, NULL on failure.
 405 */
 406struct kmem_cache *
 407kmem_cache_create(const char *name, unsigned int size, unsigned int align,
 408                slab_flags_t flags, void (*ctor)(void *))
 409{
 410        return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
 411                                          ctor);
 412}
 413EXPORT_SYMBOL(kmem_cache_create);
 414
 415static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
 416{
 417        LIST_HEAD(to_destroy);
 418        struct kmem_cache *s, *s2;
 419
 420        /*
 421         * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
 422         * @slab_caches_to_rcu_destroy list.  The slab pages are freed
 423         * through RCU and the associated kmem_cache are dereferenced
 424         * while freeing the pages, so the kmem_caches should be freed only
 425         * after the pending RCU operations are finished.  As rcu_barrier()
 426         * is a pretty slow operation, we batch all pending destructions
 427         * asynchronously.
 428         */
 429        mutex_lock(&slab_mutex);
 430        list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
 431        mutex_unlock(&slab_mutex);
 432
 433        if (list_empty(&to_destroy))
 434                return;
 435
 436        rcu_barrier();
 437
 438        list_for_each_entry_safe(s, s2, &to_destroy, list) {
 439#ifdef SLAB_SUPPORTS_SYSFS
 440                sysfs_slab_release(s);
 441#else
 442                slab_kmem_cache_release(s);
 443#endif
 444        }
 445}
 446
 447static int shutdown_cache(struct kmem_cache *s)
 448{
 449        /* free asan quarantined objects */
 450        kasan_cache_shutdown(s);
 451
 452        if (__kmem_cache_shutdown(s) != 0)
 453                return -EBUSY;
 454
 455        list_del(&s->list);
 456
 457        if (s->flags & SLAB_TYPESAFE_BY_RCU) {
 458#ifdef SLAB_SUPPORTS_SYSFS
 459                sysfs_slab_unlink(s);
 460#endif
 461                list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
 462                schedule_work(&slab_caches_to_rcu_destroy_work);
 463        } else {
 464#ifdef SLAB_SUPPORTS_SYSFS
 465                sysfs_slab_unlink(s);
 466                sysfs_slab_release(s);
 467#else
 468                slab_kmem_cache_release(s);
 469#endif
 470        }
 471
 472        return 0;
 473}
 474
 475void slab_kmem_cache_release(struct kmem_cache *s)
 476{
 477        __kmem_cache_release(s);
 478        kfree_const(s->name);
 479        kmem_cache_free(kmem_cache, s);
 480}
 481
 482void kmem_cache_destroy(struct kmem_cache *s)
 483{
 484        int err;
 485
 486        if (unlikely(!s))
 487                return;
 488
 489        get_online_cpus();
 490        get_online_mems();
 491
 492        mutex_lock(&slab_mutex);
 493
 494        s->refcount--;
 495        if (s->refcount)
 496                goto out_unlock;
 497
 498        err = shutdown_cache(s);
 499        if (err) {
 500                pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
 501                       s->name);
 502                dump_stack();
 503        }
 504out_unlock:
 505        mutex_unlock(&slab_mutex);
 506
 507        put_online_mems();
 508        put_online_cpus();
 509}
 510EXPORT_SYMBOL(kmem_cache_destroy);
 511
 512/**
 513 * kmem_cache_shrink - Shrink a cache.
 514 * @cachep: The cache to shrink.
 515 *
 516 * Releases as many slabs as possible for a cache.
 517 * To help debugging, a zero exit status indicates all slabs were released.
 518 *
 519 * Return: %0 if all slabs were released, non-zero otherwise
 520 */
 521int kmem_cache_shrink(struct kmem_cache *cachep)
 522{
 523        int ret;
 524
 525        get_online_cpus();
 526        get_online_mems();
 527        kasan_cache_shrink(cachep);
 528        ret = __kmem_cache_shrink(cachep);
 529        put_online_mems();
 530        put_online_cpus();
 531        return ret;
 532}
 533EXPORT_SYMBOL(kmem_cache_shrink);
 534
 535bool slab_is_available(void)
 536{
 537        return slab_state >= UP;
 538}
 539
 540#ifndef CONFIG_SLOB
 541/* Create a cache during boot when no slab services are available yet */
 542void __init create_boot_cache(struct kmem_cache *s, const char *name,
 543                unsigned int size, slab_flags_t flags,
 544                unsigned int useroffset, unsigned int usersize)
 545{
 546        int err;
 547        unsigned int align = ARCH_KMALLOC_MINALIGN;
 548
 549        s->name = name;
 550        s->size = s->object_size = size;
 551
 552        /*
 553         * For power of two sizes, guarantee natural alignment for kmalloc
 554         * caches, regardless of SL*B debugging options.
 555         */
 556        if (is_power_of_2(size))
 557                align = max(align, size);
 558        s->align = calculate_alignment(flags, align, size);
 559
 560        s->useroffset = useroffset;
 561        s->usersize = usersize;
 562
 563        err = __kmem_cache_create(s, flags);
 564
 565        if (err)
 566                panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
 567                                        name, size, err);
 568
 569        s->refcount = -1;       /* Exempt from merging for now */
 570}
 571
 572struct kmem_cache *__init create_kmalloc_cache(const char *name,
 573                unsigned int size, slab_flags_t flags,
 574                unsigned int useroffset, unsigned int usersize)
 575{
 576        struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
 577
 578        if (!s)
 579                panic("Out of memory when creating slab %s\n", name);
 580
 581        create_boot_cache(s, name, size, flags, useroffset, usersize);
 582        list_add(&s->list, &slab_caches);
 583        s->refcount = 1;
 584        return s;
 585}
 586
 587struct kmem_cache *
 588kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
 589{ /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
 590EXPORT_SYMBOL(kmalloc_caches);
 591
 592/*
 593 * Conversion table for small slabs sizes / 8 to the index in the
 594 * kmalloc array. This is necessary for slabs < 192 since we have non power
 595 * of two cache sizes there. The size of larger slabs can be determined using
 596 * fls.
 597 */
 598static u8 size_index[24] __ro_after_init = {
 599        3,      /* 8 */
 600        4,      /* 16 */
 601        5,      /* 24 */
 602        5,      /* 32 */
 603        6,      /* 40 */
 604        6,      /* 48 */
 605        6,      /* 56 */
 606        6,      /* 64 */
 607        1,      /* 72 */
 608        1,      /* 80 */
 609        1,      /* 88 */
 610        1,      /* 96 */
 611        7,      /* 104 */
 612        7,      /* 112 */
 613        7,      /* 120 */
 614        7,      /* 128 */
 615        2,      /* 136 */
 616        2,      /* 144 */
 617        2,      /* 152 */
 618        2,      /* 160 */
 619        2,      /* 168 */
 620        2,      /* 176 */
 621        2,      /* 184 */
 622        2       /* 192 */
 623};
 624
 625static inline unsigned int size_index_elem(unsigned int bytes)
 626{
 627        return (bytes - 1) / 8;
 628}
 629
 630/*
 631 * Find the kmem_cache structure that serves a given size of
 632 * allocation
 633 */
 634struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
 635{
 636        unsigned int index;
 637
 638        if (size <= 192) {
 639                if (!size)
 640                        return ZERO_SIZE_PTR;
 641
 642                index = size_index[size_index_elem(size)];
 643        } else {
 644                if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
 645                        return NULL;
 646                index = fls(size - 1);
 647        }
 648
 649        return kmalloc_caches[kmalloc_type(flags)][index];
 650}
 651
 652#ifdef CONFIG_ZONE_DMA
 653#define INIT_KMALLOC_INFO(__size, __short_size)                 \
 654{                                                               \
 655        .name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,      \
 656        .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,  \
 657        .name[KMALLOC_DMA]     = "dma-kmalloc-" #__short_size,  \
 658        .size = __size,                                         \
 659}
 660#else
 661#define INIT_KMALLOC_INFO(__size, __short_size)                 \
 662{                                                               \
 663        .name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,      \
 664        .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,  \
 665        .size = __size,                                         \
 666}
 667#endif
 668
 669/*
 670 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
 671 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
 672 * kmalloc-67108864.
 673 */
 674const struct kmalloc_info_struct kmalloc_info[] __initconst = {
 675        INIT_KMALLOC_INFO(0, 0),
 676        INIT_KMALLOC_INFO(96, 96),
 677        INIT_KMALLOC_INFO(192, 192),
 678        INIT_KMALLOC_INFO(8, 8),
 679        INIT_KMALLOC_INFO(16, 16),
 680        INIT_KMALLOC_INFO(32, 32),
 681        INIT_KMALLOC_INFO(64, 64),
 682        INIT_KMALLOC_INFO(128, 128),
 683        INIT_KMALLOC_INFO(256, 256),
 684        INIT_KMALLOC_INFO(512, 512),
 685        INIT_KMALLOC_INFO(1024, 1k),
 686        INIT_KMALLOC_INFO(2048, 2k),
 687        INIT_KMALLOC_INFO(4096, 4k),
 688        INIT_KMALLOC_INFO(8192, 8k),
 689        INIT_KMALLOC_INFO(16384, 16k),
 690        INIT_KMALLOC_INFO(32768, 32k),
 691        INIT_KMALLOC_INFO(65536, 64k),
 692        INIT_KMALLOC_INFO(131072, 128k),
 693        INIT_KMALLOC_INFO(262144, 256k),
 694        INIT_KMALLOC_INFO(524288, 512k),
 695        INIT_KMALLOC_INFO(1048576, 1M),
 696        INIT_KMALLOC_INFO(2097152, 2M),
 697        INIT_KMALLOC_INFO(4194304, 4M),
 698        INIT_KMALLOC_INFO(8388608, 8M),
 699        INIT_KMALLOC_INFO(16777216, 16M),
 700        INIT_KMALLOC_INFO(33554432, 32M),
 701        INIT_KMALLOC_INFO(67108864, 64M)
 702};
 703
 704/*
 705 * Patch up the size_index table if we have strange large alignment
 706 * requirements for the kmalloc array. This is only the case for
 707 * MIPS it seems. The standard arches will not generate any code here.
 708 *
 709 * Largest permitted alignment is 256 bytes due to the way we
 710 * handle the index determination for the smaller caches.
 711 *
 712 * Make sure that nothing crazy happens if someone starts tinkering
 713 * around with ARCH_KMALLOC_MINALIGN
 714 */
 715void __init setup_kmalloc_cache_index_table(void)
 716{
 717        unsigned int i;
 718
 719        BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
 720                (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
 721
 722        for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
 723                unsigned int elem = size_index_elem(i);
 724
 725                if (elem >= ARRAY_SIZE(size_index))
 726                        break;
 727                size_index[elem] = KMALLOC_SHIFT_LOW;
 728        }
 729
 730        if (KMALLOC_MIN_SIZE >= 64) {
 731                /*
 732                 * The 96 byte size cache is not used if the alignment
 733                 * is 64 byte.
 734                 */
 735                for (i = 64 + 8; i <= 96; i += 8)
 736                        size_index[size_index_elem(i)] = 7;
 737
 738        }
 739
 740        if (KMALLOC_MIN_SIZE >= 128) {
 741                /*
 742                 * The 192 byte sized cache is not used if the alignment
 743                 * is 128 byte. Redirect kmalloc to use the 256 byte cache
 744                 * instead.
 745                 */
 746                for (i = 128 + 8; i <= 192; i += 8)
 747                        size_index[size_index_elem(i)] = 8;
 748        }
 749}
 750
 751static void __init
 752new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
 753{
 754        if (type == KMALLOC_RECLAIM)
 755                flags |= SLAB_RECLAIM_ACCOUNT;
 756
 757        kmalloc_caches[type][idx] = create_kmalloc_cache(
 758                                        kmalloc_info[idx].name[type],
 759                                        kmalloc_info[idx].size, flags, 0,
 760                                        kmalloc_info[idx].size);
 761}
 762
 763/*
 764 * Create the kmalloc array. Some of the regular kmalloc arrays
 765 * may already have been created because they were needed to
 766 * enable allocations for slab creation.
 767 */
 768void __init create_kmalloc_caches(slab_flags_t flags)
 769{
 770        int i;
 771        enum kmalloc_cache_type type;
 772
 773        for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
 774                for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
 775                        if (!kmalloc_caches[type][i])
 776                                new_kmalloc_cache(i, type, flags);
 777
 778                        /*
 779                         * Caches that are not of the two-to-the-power-of size.
 780                         * These have to be created immediately after the
 781                         * earlier power of two caches
 782                         */
 783                        if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
 784                                        !kmalloc_caches[type][1])
 785                                new_kmalloc_cache(1, type, flags);
 786                        if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
 787                                        !kmalloc_caches[type][2])
 788                                new_kmalloc_cache(2, type, flags);
 789                }
 790        }
 791
 792        /* Kmalloc array is now usable */
 793        slab_state = UP;
 794
 795#ifdef CONFIG_ZONE_DMA
 796        for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
 797                struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
 798
 799                if (s) {
 800                        kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
 801                                kmalloc_info[i].name[KMALLOC_DMA],
 802                                kmalloc_info[i].size,
 803                                SLAB_CACHE_DMA | flags, 0,
 804                                kmalloc_info[i].size);
 805                }
 806        }
 807#endif
 808}
 809#endif /* !CONFIG_SLOB */
 810
 811gfp_t kmalloc_fix_flags(gfp_t flags)
 812{
 813        gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
 814
 815        flags &= ~GFP_SLAB_BUG_MASK;
 816        pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
 817                        invalid_mask, &invalid_mask, flags, &flags);
 818        dump_stack();
 819
 820        return flags;
 821}
 822
 823/*
 824 * To avoid unnecessary overhead, we pass through large allocation requests
 825 * directly to the page allocator. We use __GFP_COMP, because we will need to
 826 * know the allocation order to free the pages properly in kfree.
 827 */
 828void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
 829{
 830        void *ret = NULL;
 831        struct page *page;
 832
 833        if (unlikely(flags & GFP_SLAB_BUG_MASK))
 834                flags = kmalloc_fix_flags(flags);
 835
 836        flags |= __GFP_COMP;
 837        page = alloc_pages(flags, order);
 838        if (likely(page)) {
 839                ret = page_address(page);
 840                mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
 841                                    PAGE_SIZE << order);
 842        }
 843        ret = kasan_kmalloc_large(ret, size, flags);
 844        /* As ret might get tagged, call kmemleak hook after KASAN. */
 845        kmemleak_alloc(ret, size, 1, flags);
 846        return ret;
 847}
 848EXPORT_SYMBOL(kmalloc_order);
 849
 850#ifdef CONFIG_TRACING
 851void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
 852{
 853        void *ret = kmalloc_order(size, flags, order);
 854        trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
 855        return ret;
 856}
 857EXPORT_SYMBOL(kmalloc_order_trace);
 858#endif
 859
 860#ifdef CONFIG_SLAB_FREELIST_RANDOM
 861/* Randomize a generic freelist */
 862static void freelist_randomize(struct rnd_state *state, unsigned int *list,
 863                               unsigned int count)
 864{
 865        unsigned int rand;
 866        unsigned int i;
 867
 868        for (i = 0; i < count; i++)
 869                list[i] = i;
 870
 871        /* Fisher-Yates shuffle */
 872        for (i = count - 1; i > 0; i--) {
 873                rand = prandom_u32_state(state);
 874                rand %= (i + 1);
 875                swap(list[i], list[rand]);
 876        }
 877}
 878
 879/* Create a random sequence per cache */
 880int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
 881                                    gfp_t gfp)
 882{
 883        struct rnd_state state;
 884
 885        if (count < 2 || cachep->random_seq)
 886                return 0;
 887
 888        cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
 889        if (!cachep->random_seq)
 890                return -ENOMEM;
 891
 892        /* Get best entropy at this stage of boot */
 893        prandom_seed_state(&state, get_random_long());
 894
 895        freelist_randomize(&state, cachep->random_seq, count);
 896        return 0;
 897}
 898
 899/* Destroy the per-cache random freelist sequence */
 900void cache_random_seq_destroy(struct kmem_cache *cachep)
 901{
 902        kfree(cachep->random_seq);
 903        cachep->random_seq = NULL;
 904}
 905#endif /* CONFIG_SLAB_FREELIST_RANDOM */
 906
 907#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
 908#ifdef CONFIG_SLAB
 909#define SLABINFO_RIGHTS (0600)
 910#else
 911#define SLABINFO_RIGHTS (0400)
 912#endif
 913
 914static void print_slabinfo_header(struct seq_file *m)
 915{
 916        /*
 917         * Output format version, so at least we can change it
 918         * without _too_ many complaints.
 919         */
 920#ifdef CONFIG_DEBUG_SLAB
 921        seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
 922#else
 923        seq_puts(m, "slabinfo - version: 2.1\n");
 924#endif
 925        seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
 926        seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
 927        seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
 928#ifdef CONFIG_DEBUG_SLAB
 929        seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
 930        seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
 931#endif
 932        seq_putc(m, '\n');
 933}
 934
 935void *slab_start(struct seq_file *m, loff_t *pos)
 936{
 937        mutex_lock(&slab_mutex);
 938        return seq_list_start(&slab_caches, *pos);
 939}
 940
 941void *slab_next(struct seq_file *m, void *p, loff_t *pos)
 942{
 943        return seq_list_next(p, &slab_caches, pos);
 944}
 945
 946void slab_stop(struct seq_file *m, void *p)
 947{
 948        mutex_unlock(&slab_mutex);
 949}
 950
 951static void cache_show(struct kmem_cache *s, struct seq_file *m)
 952{
 953        struct slabinfo sinfo;
 954
 955        memset(&sinfo, 0, sizeof(sinfo));
 956        get_slabinfo(s, &sinfo);
 957
 958        seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
 959                   s->name, sinfo.active_objs, sinfo.num_objs, s->size,
 960                   sinfo.objects_per_slab, (1 << sinfo.cache_order));
 961
 962        seq_printf(m, " : tunables %4u %4u %4u",
 963                   sinfo.limit, sinfo.batchcount, sinfo.shared);
 964        seq_printf(m, " : slabdata %6lu %6lu %6lu",
 965                   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
 966        slabinfo_show_stats(m, s);
 967        seq_putc(m, '\n');
 968}
 969
 970static int slab_show(struct seq_file *m, void *p)
 971{
 972        struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
 973
 974        if (p == slab_caches.next)
 975                print_slabinfo_header(m);
 976        cache_show(s, m);
 977        return 0;
 978}
 979
 980void dump_unreclaimable_slab(void)
 981{
 982        struct kmem_cache *s;
 983        struct slabinfo sinfo;
 984
 985        /*
 986         * Here acquiring slab_mutex is risky since we don't prefer to get
 987         * sleep in oom path. But, without mutex hold, it may introduce a
 988         * risk of crash.
 989         * Use mutex_trylock to protect the list traverse, dump nothing
 990         * without acquiring the mutex.
 991         */
 992        if (!mutex_trylock(&slab_mutex)) {
 993                pr_warn("excessive unreclaimable slab but cannot dump stats\n");
 994                return;
 995        }
 996
 997        pr_info("Unreclaimable slab info:\n");
 998        pr_info("Name                      Used          Total\n");
 999
1000        list_for_each_entry(s, &slab_caches, list) {
1001                if (s->flags & SLAB_RECLAIM_ACCOUNT)
1002                        continue;
1003
1004                get_slabinfo(s, &sinfo);
1005
1006                if (sinfo.num_objs > 0)
1007                        pr_info("%-17s %10luKB %10luKB\n", s->name,
1008                                (sinfo.active_objs * s->size) / 1024,
1009                                (sinfo.num_objs * s->size) / 1024);
1010        }
1011        mutex_unlock(&slab_mutex);
1012}
1013
1014#if defined(CONFIG_MEMCG_KMEM)
1015int memcg_slab_show(struct seq_file *m, void *p)
1016{
1017        /*
1018         * Deprecated.
1019         * Please, take a look at tools/cgroup/slabinfo.py .
1020         */
1021        return 0;
1022}
1023#endif
1024
1025/*
1026 * slabinfo_op - iterator that generates /proc/slabinfo
1027 *
1028 * Output layout:
1029 * cache-name
1030 * num-active-objs
1031 * total-objs
1032 * object size
1033 * num-active-slabs
1034 * total-slabs
1035 * num-pages-per-slab
1036 * + further values on SMP and with statistics enabled
1037 */
1038static const struct seq_operations slabinfo_op = {
1039        .start = slab_start,
1040        .next = slab_next,
1041        .stop = slab_stop,
1042        .show = slab_show,
1043};
1044
1045static int slabinfo_open(struct inode *inode, struct file *file)
1046{
1047        return seq_open(file, &slabinfo_op);
1048}
1049
1050static const struct proc_ops slabinfo_proc_ops = {
1051        .proc_flags     = PROC_ENTRY_PERMANENT,
1052        .proc_open      = slabinfo_open,
1053        .proc_read      = seq_read,
1054        .proc_write     = slabinfo_write,
1055        .proc_lseek     = seq_lseek,
1056        .proc_release   = seq_release,
1057};
1058
1059static int __init slab_proc_init(void)
1060{
1061        proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
1062        return 0;
1063}
1064module_init(slab_proc_init);
1065
1066#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
1067
1068static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1069                                           gfp_t flags)
1070{
1071        void *ret;
1072        size_t ks;
1073
1074        ks = ksize(p);
1075
1076        if (ks >= new_size) {
1077                p = kasan_krealloc((void *)p, new_size, flags);
1078                return (void *)p;
1079        }
1080
1081        ret = kmalloc_track_caller(new_size, flags);
1082        if (ret && p)
1083                memcpy(ret, p, ks);
1084
1085        return ret;
1086}
1087
1088/**
1089 * krealloc - reallocate memory. The contents will remain unchanged.
1090 * @p: object to reallocate memory for.
1091 * @new_size: how many bytes of memory are required.
1092 * @flags: the type of memory to allocate.
1093 *
1094 * The contents of the object pointed to are preserved up to the
1095 * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
1096 * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
1097 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
1098 *
1099 * Return: pointer to the allocated memory or %NULL in case of error
1100 */
1101void *krealloc(const void *p, size_t new_size, gfp_t flags)
1102{
1103        void *ret;
1104
1105        if (unlikely(!new_size)) {
1106                kfree(p);
1107                return ZERO_SIZE_PTR;
1108        }
1109
1110        ret = __do_krealloc(p, new_size, flags);
1111        if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1112                kfree(p);
1113
1114        return ret;
1115}
1116EXPORT_SYMBOL(krealloc);
1117
1118/**
1119 * kfree_sensitive - Clear sensitive information in memory before freeing
1120 * @p: object to free memory of
1121 *
1122 * The memory of the object @p points to is zeroed before freed.
1123 * If @p is %NULL, kfree_sensitive() does nothing.
1124 *
1125 * Note: this function zeroes the whole allocated buffer which can be a good
1126 * deal bigger than the requested buffer size passed to kmalloc(). So be
1127 * careful when using this function in performance sensitive code.
1128 */
1129void kfree_sensitive(const void *p)
1130{
1131        size_t ks;
1132        void *mem = (void *)p;
1133
1134        ks = ksize(mem);
1135        if (ks)
1136                memzero_explicit(mem, ks);
1137        kfree(mem);
1138}
1139EXPORT_SYMBOL(kfree_sensitive);
1140
1141/**
1142 * ksize - get the actual amount of memory allocated for a given object
1143 * @objp: Pointer to the object
1144 *
1145 * kmalloc may internally round up allocations and return more memory
1146 * than requested. ksize() can be used to determine the actual amount of
1147 * memory allocated. The caller may use this additional memory, even though
1148 * a smaller amount of memory was initially specified with the kmalloc call.
1149 * The caller must guarantee that objp points to a valid object previously
1150 * allocated with either kmalloc() or kmem_cache_alloc(). The object
1151 * must not be freed during the duration of the call.
1152 *
1153 * Return: size of the actual memory used by @objp in bytes
1154 */
1155size_t ksize(const void *objp)
1156{
1157        size_t size;
1158
1159        /*
1160         * We need to check that the pointed to object is valid, and only then
1161         * unpoison the shadow memory below. We use __kasan_check_read(), to
1162         * generate a more useful report at the time ksize() is called (rather
1163         * than later where behaviour is undefined due to potential
1164         * use-after-free or double-free).
1165         *
1166         * If the pointed to memory is invalid we return 0, to avoid users of
1167         * ksize() writing to and potentially corrupting the memory region.
1168         *
1169         * We want to perform the check before __ksize(), to avoid potentially
1170         * crashing in __ksize() due to accessing invalid metadata.
1171         */
1172        if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
1173                return 0;
1174
1175        size = __ksize(objp);
1176        /*
1177         * We assume that ksize callers could use whole allocated area,
1178         * so we need to unpoison this area.
1179         */
1180        kasan_unpoison_range(objp, size);
1181        return size;
1182}
1183EXPORT_SYMBOL(ksize);
1184
1185/* Tracepoints definitions. */
1186EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1187EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1188EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1189EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1190EXPORT_TRACEPOINT_SYMBOL(kfree);
1191EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1192
1193int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
1194{
1195        if (__should_failslab(s, gfpflags))
1196                return -ENOMEM;
1197        return 0;
1198}
1199ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
1200