linux/mm/mempool.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/mm/mempool.c
   4 *
   5 *  memory buffer pool support. Such pools are mostly used
   6 *  for guaranteed, deadlock-free memory allocations during
   7 *  extreme VM load.
   8 *
   9 *  started by Ingo Molnar, Copyright (C) 2001
  10 *  debugging by David Rientjes, Copyright (C) 2015
  11 */
  12
  13#include <linux/mm.h>
  14#include <linux/slab.h>
  15#include <linux/highmem.h>
  16#include <linux/kasan.h>
  17#include <linux/kmemleak.h>
  18#include <linux/export.h>
  19#include <linux/mempool.h>
  20#include <linux/blkdev.h>
  21#include <linux/writeback.h>
  22#include "slab.h"
  23
  24#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
  25static void poison_error(mempool_t *pool, void *element, size_t size,
  26                         size_t byte)
  27{
  28        const int nr = pool->curr_nr;
  29        const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
  30        const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
  31        int i;
  32
  33        pr_err("BUG: mempool element poison mismatch\n");
  34        pr_err("Mempool %p size %zu\n", pool, size);
  35        pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
  36        for (i = start; i < end; i++)
  37                pr_cont("%x ", *(u8 *)(element + i));
  38        pr_cont("%s\n", end < size ? "..." : "");
  39        dump_stack();
  40}
  41
  42static void __check_element(mempool_t *pool, void *element, size_t size)
  43{
  44        u8 *obj = element;
  45        size_t i;
  46
  47        for (i = 0; i < size; i++) {
  48                u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
  49
  50                if (obj[i] != exp) {
  51                        poison_error(pool, element, size, i);
  52                        return;
  53                }
  54        }
  55        memset(obj, POISON_INUSE, size);
  56}
  57
  58static void check_element(mempool_t *pool, void *element)
  59{
  60        /* Mempools backed by slab allocator */
  61        if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
  62                __check_element(pool, element, ksize(element));
  63
  64        /* Mempools backed by page allocator */
  65        if (pool->free == mempool_free_pages) {
  66                int order = (int)(long)pool->pool_data;
  67                void *addr = kmap_atomic((struct page *)element);
  68
  69                __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
  70                kunmap_atomic(addr);
  71        }
  72}
  73
  74static void __poison_element(void *element, size_t size)
  75{
  76        u8 *obj = element;
  77
  78        memset(obj, POISON_FREE, size - 1);
  79        obj[size - 1] = POISON_END;
  80}
  81
  82static void poison_element(mempool_t *pool, void *element)
  83{
  84        /* Mempools backed by slab allocator */
  85        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
  86                __poison_element(element, ksize(element));
  87
  88        /* Mempools backed by page allocator */
  89        if (pool->alloc == mempool_alloc_pages) {
  90                int order = (int)(long)pool->pool_data;
  91                void *addr = kmap_atomic((struct page *)element);
  92
  93                __poison_element(addr, 1UL << (PAGE_SHIFT + order));
  94                kunmap_atomic(addr);
  95        }
  96}
  97#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
  98static inline void check_element(mempool_t *pool, void *element)
  99{
 100}
 101static inline void poison_element(mempool_t *pool, void *element)
 102{
 103}
 104#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
 105
 106static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
 107{
 108        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
 109                kasan_poison_kfree(element, _RET_IP_);
 110        if (pool->alloc == mempool_alloc_pages)
 111                kasan_free_pages(element, (unsigned long)pool->pool_data);
 112}
 113
 114static void kasan_unpoison_element(mempool_t *pool, void *element)
 115{
 116        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
 117                kasan_unpoison_slab(element);
 118        if (pool->alloc == mempool_alloc_pages)
 119                kasan_alloc_pages(element, (unsigned long)pool->pool_data);
 120}
 121
 122static __always_inline void add_element(mempool_t *pool, void *element)
 123{
 124        BUG_ON(pool->curr_nr >= pool->min_nr);
 125        poison_element(pool, element);
 126        kasan_poison_element(pool, element);
 127        pool->elements[pool->curr_nr++] = element;
 128}
 129
 130static void *remove_element(mempool_t *pool)
 131{
 132        void *element = pool->elements[--pool->curr_nr];
 133
 134        BUG_ON(pool->curr_nr < 0);
 135        kasan_unpoison_element(pool, element);
 136        check_element(pool, element);
 137        return element;
 138}
 139
 140/**
 141 * mempool_exit - exit a mempool initialized with mempool_init()
 142 * @pool:      pointer to the memory pool which was initialized with
 143 *             mempool_init().
 144 *
 145 * Free all reserved elements in @pool and @pool itself.  This function
 146 * only sleeps if the free_fn() function sleeps.
 147 *
 148 * May be called on a zeroed but uninitialized mempool (i.e. allocated with
 149 * kzalloc()).
 150 */
 151void mempool_exit(mempool_t *pool)
 152{
 153        while (pool->curr_nr) {
 154                void *element = remove_element(pool);
 155                pool->free(element, pool->pool_data);
 156        }
 157        kfree(pool->elements);
 158        pool->elements = NULL;
 159}
 160EXPORT_SYMBOL(mempool_exit);
 161
 162/**
 163 * mempool_destroy - deallocate a memory pool
 164 * @pool:      pointer to the memory pool which was allocated via
 165 *             mempool_create().
 166 *
 167 * Free all reserved elements in @pool and @pool itself.  This function
 168 * only sleeps if the free_fn() function sleeps.
 169 */
 170void mempool_destroy(mempool_t *pool)
 171{
 172        if (unlikely(!pool))
 173                return;
 174
 175        mempool_exit(pool);
 176        kfree(pool);
 177}
 178EXPORT_SYMBOL(mempool_destroy);
 179
 180int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
 181                      mempool_free_t *free_fn, void *pool_data,
 182                      gfp_t gfp_mask, int node_id)
 183{
 184        spin_lock_init(&pool->lock);
 185        pool->min_nr    = min_nr;
 186        pool->pool_data = pool_data;
 187        pool->alloc     = alloc_fn;
 188        pool->free      = free_fn;
 189        init_waitqueue_head(&pool->wait);
 190
 191        pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
 192                                            gfp_mask, node_id);
 193        if (!pool->elements)
 194                return -ENOMEM;
 195
 196        /*
 197         * First pre-allocate the guaranteed number of buffers.
 198         */
 199        while (pool->curr_nr < pool->min_nr) {
 200                void *element;
 201
 202                element = pool->alloc(gfp_mask, pool->pool_data);
 203                if (unlikely(!element)) {
 204                        mempool_exit(pool);
 205                        return -ENOMEM;
 206                }
 207                add_element(pool, element);
 208        }
 209
 210        return 0;
 211}
 212EXPORT_SYMBOL(mempool_init_node);
 213
 214/**
 215 * mempool_init - initialize a memory pool
 216 * @pool:      pointer to the memory pool that should be initialized
 217 * @min_nr:    the minimum number of elements guaranteed to be
 218 *             allocated for this pool.
 219 * @alloc_fn:  user-defined element-allocation function.
 220 * @free_fn:   user-defined element-freeing function.
 221 * @pool_data: optional private data available to the user-defined functions.
 222 *
 223 * Like mempool_create(), but initializes the pool in (i.e. embedded in another
 224 * structure).
 225 */
 226int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
 227                 mempool_free_t *free_fn, void *pool_data)
 228{
 229        return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
 230                                 pool_data, GFP_KERNEL, NUMA_NO_NODE);
 231
 232}
 233EXPORT_SYMBOL(mempool_init);
 234
 235/**
 236 * mempool_create - create a memory pool
 237 * @min_nr:    the minimum number of elements guaranteed to be
 238 *             allocated for this pool.
 239 * @alloc_fn:  user-defined element-allocation function.
 240 * @free_fn:   user-defined element-freeing function.
 241 * @pool_data: optional private data available to the user-defined functions.
 242 *
 243 * this function creates and allocates a guaranteed size, preallocated
 244 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
 245 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
 246 * functions might sleep - as long as the mempool_alloc() function is not called
 247 * from IRQ contexts.
 248 */
 249mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
 250                                mempool_free_t *free_fn, void *pool_data)
 251{
 252        return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
 253                                   GFP_KERNEL, NUMA_NO_NODE);
 254}
 255EXPORT_SYMBOL(mempool_create);
 256
 257mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
 258                               mempool_free_t *free_fn, void *pool_data,
 259                               gfp_t gfp_mask, int node_id)
 260{
 261        mempool_t *pool;
 262
 263        pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
 264        if (!pool)
 265                return NULL;
 266
 267        if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
 268                              gfp_mask, node_id)) {
 269                kfree(pool);
 270                return NULL;
 271        }
 272
 273        return pool;
 274}
 275EXPORT_SYMBOL(mempool_create_node);
 276
 277/**
 278 * mempool_resize - resize an existing memory pool
 279 * @pool:       pointer to the memory pool which was allocated via
 280 *              mempool_create().
 281 * @new_min_nr: the new minimum number of elements guaranteed to be
 282 *              allocated for this pool.
 283 *
 284 * This function shrinks/grows the pool. In the case of growing,
 285 * it cannot be guaranteed that the pool will be grown to the new
 286 * size immediately, but new mempool_free() calls will refill it.
 287 * This function may sleep.
 288 *
 289 * Note, the caller must guarantee that no mempool_destroy is called
 290 * while this function is running. mempool_alloc() & mempool_free()
 291 * might be called (eg. from IRQ contexts) while this function executes.
 292 */
 293int mempool_resize(mempool_t *pool, int new_min_nr)
 294{
 295        void *element;
 296        void **new_elements;
 297        unsigned long flags;
 298
 299        BUG_ON(new_min_nr <= 0);
 300        might_sleep();
 301
 302        spin_lock_irqsave(&pool->lock, flags);
 303        if (new_min_nr <= pool->min_nr) {
 304                while (new_min_nr < pool->curr_nr) {
 305                        element = remove_element(pool);
 306                        spin_unlock_irqrestore(&pool->lock, flags);
 307                        pool->free(element, pool->pool_data);
 308                        spin_lock_irqsave(&pool->lock, flags);
 309                }
 310                pool->min_nr = new_min_nr;
 311                goto out_unlock;
 312        }
 313        spin_unlock_irqrestore(&pool->lock, flags);
 314
 315        /* Grow the pool */
 316        new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
 317                                     GFP_KERNEL);
 318        if (!new_elements)
 319                return -ENOMEM;
 320
 321        spin_lock_irqsave(&pool->lock, flags);
 322        if (unlikely(new_min_nr <= pool->min_nr)) {
 323                /* Raced, other resize will do our work */
 324                spin_unlock_irqrestore(&pool->lock, flags);
 325                kfree(new_elements);
 326                goto out;
 327        }
 328        memcpy(new_elements, pool->elements,
 329                        pool->curr_nr * sizeof(*new_elements));
 330        kfree(pool->elements);
 331        pool->elements = new_elements;
 332        pool->min_nr = new_min_nr;
 333
 334        while (pool->curr_nr < pool->min_nr) {
 335                spin_unlock_irqrestore(&pool->lock, flags);
 336                element = pool->alloc(GFP_KERNEL, pool->pool_data);
 337                if (!element)
 338                        goto out;
 339                spin_lock_irqsave(&pool->lock, flags);
 340                if (pool->curr_nr < pool->min_nr) {
 341                        add_element(pool, element);
 342                } else {
 343                        spin_unlock_irqrestore(&pool->lock, flags);
 344                        pool->free(element, pool->pool_data);   /* Raced */
 345                        goto out;
 346                }
 347        }
 348out_unlock:
 349        spin_unlock_irqrestore(&pool->lock, flags);
 350out:
 351        return 0;
 352}
 353EXPORT_SYMBOL(mempool_resize);
 354
 355/**
 356 * mempool_alloc - allocate an element from a specific memory pool
 357 * @pool:      pointer to the memory pool which was allocated via
 358 *             mempool_create().
 359 * @gfp_mask:  the usual allocation bitmask.
 360 *
 361 * this function only sleeps if the alloc_fn() function sleeps or
 362 * returns NULL. Note that due to preallocation, this function
 363 * *never* fails when called from process contexts. (it might
 364 * fail if called from an IRQ context.)
 365 * Note: using __GFP_ZERO is not supported.
 366 */
 367void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
 368{
 369        void *element;
 370        unsigned long flags;
 371        wait_queue_entry_t wait;
 372        gfp_t gfp_temp;
 373
 374        VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
 375        might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
 376
 377        gfp_mask |= __GFP_NOMEMALLOC;   /* don't allocate emergency reserves */
 378        gfp_mask |= __GFP_NORETRY;      /* don't loop in __alloc_pages */
 379        gfp_mask |= __GFP_NOWARN;       /* failures are OK */
 380
 381        gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
 382
 383repeat_alloc:
 384
 385        element = pool->alloc(gfp_temp, pool->pool_data);
 386        if (likely(element != NULL))
 387                return element;
 388
 389        spin_lock_irqsave(&pool->lock, flags);
 390        if (likely(pool->curr_nr)) {
 391                element = remove_element(pool);
 392                spin_unlock_irqrestore(&pool->lock, flags);
 393                /* paired with rmb in mempool_free(), read comment there */
 394                smp_wmb();
 395                /*
 396                 * Update the allocation stack trace as this is more useful
 397                 * for debugging.
 398                 */
 399                kmemleak_update_trace(element);
 400                return element;
 401        }
 402
 403        /*
 404         * We use gfp mask w/o direct reclaim or IO for the first round.  If
 405         * alloc failed with that and @pool was empty, retry immediately.
 406         */
 407        if (gfp_temp != gfp_mask) {
 408                spin_unlock_irqrestore(&pool->lock, flags);
 409                gfp_temp = gfp_mask;
 410                goto repeat_alloc;
 411        }
 412
 413        /* We must not sleep if !__GFP_DIRECT_RECLAIM */
 414        if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
 415                spin_unlock_irqrestore(&pool->lock, flags);
 416                return NULL;
 417        }
 418
 419        /* Let's wait for someone else to return an element to @pool */
 420        init_wait(&wait);
 421        prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
 422
 423        spin_unlock_irqrestore(&pool->lock, flags);
 424
 425        /*
 426         * FIXME: this should be io_schedule().  The timeout is there as a
 427         * workaround for some DM problems in 2.6.18.
 428         */
 429        io_schedule_timeout(5*HZ);
 430
 431        finish_wait(&pool->wait, &wait);
 432        goto repeat_alloc;
 433}
 434EXPORT_SYMBOL(mempool_alloc);
 435
 436/**
 437 * mempool_free - return an element to the pool.
 438 * @element:   pool element pointer.
 439 * @pool:      pointer to the memory pool which was allocated via
 440 *             mempool_create().
 441 *
 442 * this function only sleeps if the free_fn() function sleeps.
 443 */
 444void mempool_free(void *element, mempool_t *pool)
 445{
 446        unsigned long flags;
 447
 448        if (unlikely(element == NULL))
 449                return;
 450
 451        /*
 452         * Paired with the wmb in mempool_alloc().  The preceding read is
 453         * for @element and the following @pool->curr_nr.  This ensures
 454         * that the visible value of @pool->curr_nr is from after the
 455         * allocation of @element.  This is necessary for fringe cases
 456         * where @element was passed to this task without going through
 457         * barriers.
 458         *
 459         * For example, assume @p is %NULL at the beginning and one task
 460         * performs "p = mempool_alloc(...);" while another task is doing
 461         * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
 462         * may end up using curr_nr value which is from before allocation
 463         * of @p without the following rmb.
 464         */
 465        smp_rmb();
 466
 467        /*
 468         * For correctness, we need a test which is guaranteed to trigger
 469         * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
 470         * without locking achieves that and refilling as soon as possible
 471         * is desirable.
 472         *
 473         * Because curr_nr visible here is always a value after the
 474         * allocation of @element, any task which decremented curr_nr below
 475         * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
 476         * incremented to min_nr afterwards.  If curr_nr gets incremented
 477         * to min_nr after the allocation of @element, the elements
 478         * allocated after that are subject to the same guarantee.
 479         *
 480         * Waiters happen iff curr_nr is 0 and the above guarantee also
 481         * ensures that there will be frees which return elements to the
 482         * pool waking up the waiters.
 483         */
 484        if (unlikely(pool->curr_nr < pool->min_nr)) {
 485                spin_lock_irqsave(&pool->lock, flags);
 486                if (likely(pool->curr_nr < pool->min_nr)) {
 487                        add_element(pool, element);
 488                        spin_unlock_irqrestore(&pool->lock, flags);
 489                        wake_up(&pool->wait);
 490                        return;
 491                }
 492                spin_unlock_irqrestore(&pool->lock, flags);
 493        }
 494        pool->free(element, pool->pool_data);
 495}
 496EXPORT_SYMBOL(mempool_free);
 497
 498/*
 499 * A commonly used alloc and free fn.
 500 */
 501void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
 502{
 503        struct kmem_cache *mem = pool_data;
 504        VM_BUG_ON(mem->ctor);
 505        return kmem_cache_alloc(mem, gfp_mask);
 506}
 507EXPORT_SYMBOL(mempool_alloc_slab);
 508
 509void mempool_free_slab(void *element, void *pool_data)
 510{
 511        struct kmem_cache *mem = pool_data;
 512        kmem_cache_free(mem, element);
 513}
 514EXPORT_SYMBOL(mempool_free_slab);
 515
 516/*
 517 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
 518 * specified by pool_data
 519 */
 520void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
 521{
 522        size_t size = (size_t)pool_data;
 523        return kmalloc(size, gfp_mask);
 524}
 525EXPORT_SYMBOL(mempool_kmalloc);
 526
 527void mempool_kfree(void *element, void *pool_data)
 528{
 529        kfree(element);
 530}
 531EXPORT_SYMBOL(mempool_kfree);
 532
 533/*
 534 * A simple mempool-backed page allocator that allocates pages
 535 * of the order specified by pool_data.
 536 */
 537void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
 538{
 539        int order = (int)(long)pool_data;
 540        return alloc_pages(gfp_mask, order);
 541}
 542EXPORT_SYMBOL(mempool_alloc_pages);
 543
 544void mempool_free_pages(void *element, void *pool_data)
 545{
 546        int order = (int)(long)pool_data;
 547        __free_pages(element, order);
 548}
 549EXPORT_SYMBOL(mempool_free_pages);
 550