linux/mm/mempool.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/mm/mempool.c
   4 *
   5 *  memory buffer pool support. Such pools are mostly used
   6 *  for guaranteed, deadlock-free memory allocations during
   7 *  extreme VM load.
   8 *
   9 *  started by Ingo Molnar, Copyright (C) 2001
  10 *  debugging by David Rientjes, Copyright (C) 2015
  11 */
  12
  13#include <linux/mm.h>
  14#include <linux/slab.h>
  15#include <linux/highmem.h>
  16#include <linux/kasan.h>
  17#include <linux/kmemleak.h>
  18#include <linux/export.h>
  19#include <linux/mempool.h>
  20#include <linux/blkdev.h>
  21#include <linux/writeback.h>
  22#include "slab.h"
  23
  24#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
  25static void poison_error(mempool_t *pool, void *element, size_t size,
  26                         size_t byte)
  27{
  28        const int nr = pool->curr_nr;
  29        const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
  30        const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
  31        int i;
  32
  33        pr_err("BUG: mempool element poison mismatch\n");
  34        pr_err("Mempool %p size %zu\n", pool, size);
  35        pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
  36        for (i = start; i < end; i++)
  37                pr_cont("%x ", *(u8 *)(element + i));
  38        pr_cont("%s\n", end < size ? "..." : "");
  39        dump_stack();
  40}
  41
  42static void __check_element(mempool_t *pool, void *element, size_t size)
  43{
  44        u8 *obj = element;
  45        size_t i;
  46
  47        for (i = 0; i < size; i++) {
  48                u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
  49
  50                if (obj[i] != exp) {
  51                        poison_error(pool, element, size, i);
  52                        return;
  53                }
  54        }
  55        memset(obj, POISON_INUSE, size);
  56}
  57
  58static void check_element(mempool_t *pool, void *element)
  59{
  60        /* Mempools backed by slab allocator */
  61        if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
  62                __check_element(pool, element, ksize(element));
  63
  64        /* Mempools backed by page allocator */
  65        if (pool->free == mempool_free_pages) {
  66                int order = (int)(long)pool->pool_data;
  67                void *addr = kmap_atomic((struct page *)element);
  68
  69                __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
  70                kunmap_atomic(addr);
  71        }
  72}
  73
  74static void __poison_element(void *element, size_t size)
  75{
  76        u8 *obj = element;
  77
  78        memset(obj, POISON_FREE, size - 1);
  79        obj[size - 1] = POISON_END;
  80}
  81
  82static void poison_element(mempool_t *pool, void *element)
  83{
  84        /* Mempools backed by slab allocator */
  85        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
  86                __poison_element(element, ksize(element));
  87
  88        /* Mempools backed by page allocator */
  89        if (pool->alloc == mempool_alloc_pages) {
  90                int order = (int)(long)pool->pool_data;
  91                void *addr = kmap_atomic((struct page *)element);
  92
  93                __poison_element(addr, 1UL << (PAGE_SHIFT + order));
  94                kunmap_atomic(addr);
  95        }
  96}
  97#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
  98static inline void check_element(mempool_t *pool, void *element)
  99{
 100}
 101static inline void poison_element(mempool_t *pool, void *element)
 102{
 103}
 104#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
 105
 106static void kasan_poison_element(mempool_t *pool, void *element)
 107{
 108        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
 109                kasan_poison_kfree(element);
 110        if (pool->alloc == mempool_alloc_pages)
 111                kasan_free_pages(element, (unsigned long)pool->pool_data);
 112}
 113
 114static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
 115{
 116        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
 117                kasan_unpoison_slab(element);
 118        if (pool->alloc == mempool_alloc_pages)
 119                kasan_alloc_pages(element, (unsigned long)pool->pool_data);
 120}
 121
 122static void add_element(mempool_t *pool, void *element)
 123{
 124        BUG_ON(pool->curr_nr >= pool->min_nr);
 125        poison_element(pool, element);
 126        kasan_poison_element(pool, element);
 127        pool->elements[pool->curr_nr++] = element;
 128}
 129
 130static void *remove_element(mempool_t *pool, gfp_t flags)
 131{
 132        void *element = pool->elements[--pool->curr_nr];
 133
 134        BUG_ON(pool->curr_nr < 0);
 135        kasan_unpoison_element(pool, element, flags);
 136        check_element(pool, element);
 137        return element;
 138}
 139
 140/**
 141 * mempool_destroy - deallocate a memory pool
 142 * @pool:      pointer to the memory pool which was allocated via
 143 *             mempool_create().
 144 *
 145 * Free all reserved elements in @pool and @pool itself.  This function
 146 * only sleeps if the free_fn() function sleeps.
 147 */
 148void mempool_destroy(mempool_t *pool)
 149{
 150        if (unlikely(!pool))
 151                return;
 152
 153        while (pool->curr_nr) {
 154                void *element = remove_element(pool, GFP_KERNEL);
 155                pool->free(element, pool->pool_data);
 156        }
 157        kfree(pool->elements);
 158        kfree(pool);
 159}
 160EXPORT_SYMBOL(mempool_destroy);
 161
 162/**
 163 * mempool_create - create a memory pool
 164 * @min_nr:    the minimum number of elements guaranteed to be
 165 *             allocated for this pool.
 166 * @alloc_fn:  user-defined element-allocation function.
 167 * @free_fn:   user-defined element-freeing function.
 168 * @pool_data: optional private data available to the user-defined functions.
 169 *
 170 * this function creates and allocates a guaranteed size, preallocated
 171 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
 172 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
 173 * functions might sleep - as long as the mempool_alloc() function is not called
 174 * from IRQ contexts.
 175 */
 176mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
 177                                mempool_free_t *free_fn, void *pool_data)
 178{
 179        return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
 180                                   GFP_KERNEL, NUMA_NO_NODE);
 181}
 182EXPORT_SYMBOL(mempool_create);
 183
 184mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
 185                               mempool_free_t *free_fn, void *pool_data,
 186                               gfp_t gfp_mask, int node_id)
 187{
 188        mempool_t *pool;
 189        pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
 190        if (!pool)
 191                return NULL;
 192        pool->elements = kmalloc_node(min_nr * sizeof(void *),
 193                                      gfp_mask, node_id);
 194        if (!pool->elements) {
 195                kfree(pool);
 196                return NULL;
 197        }
 198        spin_lock_init(&pool->lock);
 199        pool->min_nr = min_nr;
 200        pool->pool_data = pool_data;
 201        init_waitqueue_head(&pool->wait);
 202        pool->alloc = alloc_fn;
 203        pool->free = free_fn;
 204
 205        /*
 206         * First pre-allocate the guaranteed number of buffers.
 207         */
 208        while (pool->curr_nr < pool->min_nr) {
 209                void *element;
 210
 211                element = pool->alloc(gfp_mask, pool->pool_data);
 212                if (unlikely(!element)) {
 213                        mempool_destroy(pool);
 214                        return NULL;
 215                }
 216                add_element(pool, element);
 217        }
 218        return pool;
 219}
 220EXPORT_SYMBOL(mempool_create_node);
 221
 222/**
 223 * mempool_resize - resize an existing memory pool
 224 * @pool:       pointer to the memory pool which was allocated via
 225 *              mempool_create().
 226 * @new_min_nr: the new minimum number of elements guaranteed to be
 227 *              allocated for this pool.
 228 *
 229 * This function shrinks/grows the pool. In the case of growing,
 230 * it cannot be guaranteed that the pool will be grown to the new
 231 * size immediately, but new mempool_free() calls will refill it.
 232 * This function may sleep.
 233 *
 234 * Note, the caller must guarantee that no mempool_destroy is called
 235 * while this function is running. mempool_alloc() & mempool_free()
 236 * might be called (eg. from IRQ contexts) while this function executes.
 237 */
 238int mempool_resize(mempool_t *pool, int new_min_nr)
 239{
 240        void *element;
 241        void **new_elements;
 242        unsigned long flags;
 243
 244        BUG_ON(new_min_nr <= 0);
 245        might_sleep();
 246
 247        spin_lock_irqsave(&pool->lock, flags);
 248        if (new_min_nr <= pool->min_nr) {
 249                while (new_min_nr < pool->curr_nr) {
 250                        element = remove_element(pool, GFP_KERNEL);
 251                        spin_unlock_irqrestore(&pool->lock, flags);
 252                        pool->free(element, pool->pool_data);
 253                        spin_lock_irqsave(&pool->lock, flags);
 254                }
 255                pool->min_nr = new_min_nr;
 256                goto out_unlock;
 257        }
 258        spin_unlock_irqrestore(&pool->lock, flags);
 259
 260        /* Grow the pool */
 261        new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
 262                                     GFP_KERNEL);
 263        if (!new_elements)
 264                return -ENOMEM;
 265
 266        spin_lock_irqsave(&pool->lock, flags);
 267        if (unlikely(new_min_nr <= pool->min_nr)) {
 268                /* Raced, other resize will do our work */
 269                spin_unlock_irqrestore(&pool->lock, flags);
 270                kfree(new_elements);
 271                goto out;
 272        }
 273        memcpy(new_elements, pool->elements,
 274                        pool->curr_nr * sizeof(*new_elements));
 275        kfree(pool->elements);
 276        pool->elements = new_elements;
 277        pool->min_nr = new_min_nr;
 278
 279        while (pool->curr_nr < pool->min_nr) {
 280                spin_unlock_irqrestore(&pool->lock, flags);
 281                element = pool->alloc(GFP_KERNEL, pool->pool_data);
 282                if (!element)
 283                        goto out;
 284                spin_lock_irqsave(&pool->lock, flags);
 285                if (pool->curr_nr < pool->min_nr) {
 286                        add_element(pool, element);
 287                } else {
 288                        spin_unlock_irqrestore(&pool->lock, flags);
 289                        pool->free(element, pool->pool_data);   /* Raced */
 290                        goto out;
 291                }
 292        }
 293out_unlock:
 294        spin_unlock_irqrestore(&pool->lock, flags);
 295out:
 296        return 0;
 297}
 298EXPORT_SYMBOL(mempool_resize);
 299
 300/**
 301 * mempool_alloc - allocate an element from a specific memory pool
 302 * @pool:      pointer to the memory pool which was allocated via
 303 *             mempool_create().
 304 * @gfp_mask:  the usual allocation bitmask.
 305 *
 306 * this function only sleeps if the alloc_fn() function sleeps or
 307 * returns NULL. Note that due to preallocation, this function
 308 * *never* fails when called from process contexts. (it might
 309 * fail if called from an IRQ context.)
 310 * Note: using __GFP_ZERO is not supported.
 311 */
 312void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
 313{
 314        void *element;
 315        unsigned long flags;
 316        wait_queue_entry_t wait;
 317        gfp_t gfp_temp;
 318
 319        VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
 320        might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
 321
 322        gfp_mask |= __GFP_NOMEMALLOC;   /* don't allocate emergency reserves */
 323        gfp_mask |= __GFP_NORETRY;      /* don't loop in __alloc_pages */
 324        gfp_mask |= __GFP_NOWARN;       /* failures are OK */
 325
 326        gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
 327
 328repeat_alloc:
 329
 330        element = pool->alloc(gfp_temp, pool->pool_data);
 331        if (likely(element != NULL))
 332                return element;
 333
 334        spin_lock_irqsave(&pool->lock, flags);
 335        if (likely(pool->curr_nr)) {
 336                element = remove_element(pool, gfp_temp);
 337                spin_unlock_irqrestore(&pool->lock, flags);
 338                /* paired with rmb in mempool_free(), read comment there */
 339                smp_wmb();
 340                /*
 341                 * Update the allocation stack trace as this is more useful
 342                 * for debugging.
 343                 */
 344                kmemleak_update_trace(element);
 345                return element;
 346        }
 347
 348        /*
 349         * We use gfp mask w/o direct reclaim or IO for the first round.  If
 350         * alloc failed with that and @pool was empty, retry immediately.
 351         */
 352        if (gfp_temp != gfp_mask) {
 353                spin_unlock_irqrestore(&pool->lock, flags);
 354                gfp_temp = gfp_mask;
 355                goto repeat_alloc;
 356        }
 357
 358        /* We must not sleep if !__GFP_DIRECT_RECLAIM */
 359        if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
 360                spin_unlock_irqrestore(&pool->lock, flags);
 361                return NULL;
 362        }
 363
 364        /* Let's wait for someone else to return an element to @pool */
 365        init_wait(&wait);
 366        prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
 367
 368        spin_unlock_irqrestore(&pool->lock, flags);
 369
 370        /*
 371         * FIXME: this should be io_schedule().  The timeout is there as a
 372         * workaround for some DM problems in 2.6.18.
 373         */
 374        io_schedule_timeout(5*HZ);
 375
 376        finish_wait(&pool->wait, &wait);
 377        goto repeat_alloc;
 378}
 379EXPORT_SYMBOL(mempool_alloc);
 380
 381/**
 382 * mempool_free - return an element to the pool.
 383 * @element:   pool element pointer.
 384 * @pool:      pointer to the memory pool which was allocated via
 385 *             mempool_create().
 386 *
 387 * this function only sleeps if the free_fn() function sleeps.
 388 */
 389void mempool_free(void *element, mempool_t *pool)
 390{
 391        unsigned long flags;
 392
 393        if (unlikely(element == NULL))
 394                return;
 395
 396        /*
 397         * Paired with the wmb in mempool_alloc().  The preceding read is
 398         * for @element and the following @pool->curr_nr.  This ensures
 399         * that the visible value of @pool->curr_nr is from after the
 400         * allocation of @element.  This is necessary for fringe cases
 401         * where @element was passed to this task without going through
 402         * barriers.
 403         *
 404         * For example, assume @p is %NULL at the beginning and one task
 405         * performs "p = mempool_alloc(...);" while another task is doing
 406         * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
 407         * may end up using curr_nr value which is from before allocation
 408         * of @p without the following rmb.
 409         */
 410        smp_rmb();
 411
 412        /*
 413         * For correctness, we need a test which is guaranteed to trigger
 414         * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
 415         * without locking achieves that and refilling as soon as possible
 416         * is desirable.
 417         *
 418         * Because curr_nr visible here is always a value after the
 419         * allocation of @element, any task which decremented curr_nr below
 420         * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
 421         * incremented to min_nr afterwards.  If curr_nr gets incremented
 422         * to min_nr after the allocation of @element, the elements
 423         * allocated after that are subject to the same guarantee.
 424         *
 425         * Waiters happen iff curr_nr is 0 and the above guarantee also
 426         * ensures that there will be frees which return elements to the
 427         * pool waking up the waiters.
 428         */
 429        if (unlikely(pool->curr_nr < pool->min_nr)) {
 430                spin_lock_irqsave(&pool->lock, flags);
 431                if (likely(pool->curr_nr < pool->min_nr)) {
 432                        add_element(pool, element);
 433                        spin_unlock_irqrestore(&pool->lock, flags);
 434                        wake_up(&pool->wait);
 435                        return;
 436                }
 437                spin_unlock_irqrestore(&pool->lock, flags);
 438        }
 439        pool->free(element, pool->pool_data);
 440}
 441EXPORT_SYMBOL(mempool_free);
 442
 443/*
 444 * A commonly used alloc and free fn.
 445 */
 446void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
 447{
 448        struct kmem_cache *mem = pool_data;
 449        VM_BUG_ON(mem->ctor);
 450        return kmem_cache_alloc(mem, gfp_mask);
 451}
 452EXPORT_SYMBOL(mempool_alloc_slab);
 453
 454void mempool_free_slab(void *element, void *pool_data)
 455{
 456        struct kmem_cache *mem = pool_data;
 457        kmem_cache_free(mem, element);
 458}
 459EXPORT_SYMBOL(mempool_free_slab);
 460
 461/*
 462 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
 463 * specified by pool_data
 464 */
 465void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
 466{
 467        size_t size = (size_t)pool_data;
 468        return kmalloc(size, gfp_mask);
 469}
 470EXPORT_SYMBOL(mempool_kmalloc);
 471
 472void mempool_kfree(void *element, void *pool_data)
 473{
 474        kfree(element);
 475}
 476EXPORT_SYMBOL(mempool_kfree);
 477
 478/*
 479 * A simple mempool-backed page allocator that allocates pages
 480 * of the order specified by pool_data.
 481 */
 482void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
 483{
 484        int order = (int)(long)pool_data;
 485        return alloc_pages(gfp_mask, order);
 486}
 487EXPORT_SYMBOL(mempool_alloc_pages);
 488
 489void mempool_free_pages(void *element, void *pool_data)
 490{
 491        int order = (int)(long)pool_data;
 492        __free_pages(element, order);
 493}
 494EXPORT_SYMBOL(mempool_free_pages);
 495