linux/mm/mempool.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/mempool.c
   3 *
   4 *  memory buffer pool support. Such pools are mostly used
   5 *  for guaranteed, deadlock-free memory allocations during
   6 *  extreme VM load.
   7 *
   8 *  started by Ingo Molnar, Copyright (C) 2001
   9 *  debugging by David Rientjes, Copyright (C) 2015
  10 */
  11
  12#include <linux/mm.h>
  13#include <linux/slab.h>
  14#include <linux/highmem.h>
  15#include <linux/kasan.h>
  16#include <linux/kmemleak.h>
  17#include <linux/export.h>
  18#include <linux/mempool.h>
  19#include <linux/blkdev.h>
  20#include <linux/writeback.h>
  21#include "slab.h"
  22
  23#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
  24static void poison_error(mempool_t *pool, void *element, size_t size,
  25                         size_t byte)
  26{
  27        const int nr = pool->curr_nr;
  28        const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
  29        const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
  30        int i;
  31
  32        pr_err("BUG: mempool element poison mismatch\n");
  33        pr_err("Mempool %p size %zu\n", pool, size);
  34        pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
  35        for (i = start; i < end; i++)
  36                pr_cont("%x ", *(u8 *)(element + i));
  37        pr_cont("%s\n", end < size ? "..." : "");
  38        dump_stack();
  39}
  40
  41static void __check_element(mempool_t *pool, void *element, size_t size)
  42{
  43        u8 *obj = element;
  44        size_t i;
  45
  46        for (i = 0; i < size; i++) {
  47                u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
  48
  49                if (obj[i] != exp) {
  50                        poison_error(pool, element, size, i);
  51                        return;
  52                }
  53        }
  54        memset(obj, POISON_INUSE, size);
  55}
  56
  57static void check_element(mempool_t *pool, void *element)
  58{
  59        /* Mempools backed by slab allocator */
  60        if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
  61                __check_element(pool, element, ksize(element));
  62
  63        /* Mempools backed by page allocator */
  64        if (pool->free == mempool_free_pages) {
  65                int order = (int)(long)pool->pool_data;
  66                void *addr = kmap_atomic((struct page *)element);
  67
  68                __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
  69                kunmap_atomic(addr);
  70        }
  71}
  72
  73static void __poison_element(void *element, size_t size)
  74{
  75        u8 *obj = element;
  76
  77        memset(obj, POISON_FREE, size - 1);
  78        obj[size - 1] = POISON_END;
  79}
  80
  81static void poison_element(mempool_t *pool, void *element)
  82{
  83        /* Mempools backed by slab allocator */
  84        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
  85                __poison_element(element, ksize(element));
  86
  87        /* Mempools backed by page allocator */
  88        if (pool->alloc == mempool_alloc_pages) {
  89                int order = (int)(long)pool->pool_data;
  90                void *addr = kmap_atomic((struct page *)element);
  91
  92                __poison_element(addr, 1UL << (PAGE_SHIFT + order));
  93                kunmap_atomic(addr);
  94        }
  95}
  96#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
  97static inline void check_element(mempool_t *pool, void *element)
  98{
  99}
 100static inline void poison_element(mempool_t *pool, void *element)
 101{
 102}
 103#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
 104
 105static void kasan_poison_element(mempool_t *pool, void *element)
 106{
 107        if (pool->alloc == mempool_alloc_slab)
 108                kasan_slab_free(pool->pool_data, element);
 109        if (pool->alloc == mempool_kmalloc)
 110                kasan_kfree(element);
 111        if (pool->alloc == mempool_alloc_pages)
 112                kasan_free_pages(element, (unsigned long)pool->pool_data);
 113}
 114
 115static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
 116{
 117        if (pool->alloc == mempool_alloc_slab)
 118                kasan_slab_alloc(pool->pool_data, element, flags);
 119        if (pool->alloc == mempool_kmalloc)
 120                kasan_krealloc(element, (size_t)pool->pool_data, flags);
 121        if (pool->alloc == mempool_alloc_pages)
 122                kasan_alloc_pages(element, (unsigned long)pool->pool_data);
 123}
 124
 125static void add_element(mempool_t *pool, void *element)
 126{
 127        BUG_ON(pool->curr_nr >= pool->min_nr);
 128        poison_element(pool, element);
 129        kasan_poison_element(pool, element);
 130        pool->elements[pool->curr_nr++] = element;
 131}
 132
 133static void *remove_element(mempool_t *pool, gfp_t flags)
 134{
 135        void *element = pool->elements[--pool->curr_nr];
 136
 137        BUG_ON(pool->curr_nr < 0);
 138        kasan_unpoison_element(pool, element, flags);
 139        check_element(pool, element);
 140        return element;
 141}
 142
 143/**
 144 * mempool_destroy - deallocate a memory pool
 145 * @pool:      pointer to the memory pool which was allocated via
 146 *             mempool_create().
 147 *
 148 * Free all reserved elements in @pool and @pool itself.  This function
 149 * only sleeps if the free_fn() function sleeps.
 150 */
 151void mempool_destroy(mempool_t *pool)
 152{
 153        if (unlikely(!pool))
 154                return;
 155
 156        while (pool->curr_nr) {
 157                void *element = remove_element(pool, GFP_KERNEL);
 158                pool->free(element, pool->pool_data);
 159        }
 160        kfree(pool->elements);
 161        kfree(pool);
 162}
 163EXPORT_SYMBOL(mempool_destroy);
 164
 165/**
 166 * mempool_create - create a memory pool
 167 * @min_nr:    the minimum number of elements guaranteed to be
 168 *             allocated for this pool.
 169 * @alloc_fn:  user-defined element-allocation function.
 170 * @free_fn:   user-defined element-freeing function.
 171 * @pool_data: optional private data available to the user-defined functions.
 172 *
 173 * this function creates and allocates a guaranteed size, preallocated
 174 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
 175 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
 176 * functions might sleep - as long as the mempool_alloc() function is not called
 177 * from IRQ contexts.
 178 */
 179mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
 180                                mempool_free_t *free_fn, void *pool_data)
 181{
 182        return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
 183                                   GFP_KERNEL, NUMA_NO_NODE);
 184}
 185EXPORT_SYMBOL(mempool_create);
 186
 187mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
 188                               mempool_free_t *free_fn, void *pool_data,
 189                               gfp_t gfp_mask, int node_id)
 190{
 191        mempool_t *pool;
 192        pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
 193        if (!pool)
 194                return NULL;
 195        pool->elements = kmalloc_node(min_nr * sizeof(void *),
 196                                      gfp_mask, node_id);
 197        if (!pool->elements) {
 198                kfree(pool);
 199                return NULL;
 200        }
 201        spin_lock_init(&pool->lock);
 202        pool->min_nr = min_nr;
 203        pool->pool_data = pool_data;
 204        init_waitqueue_head(&pool->wait);
 205        pool->alloc = alloc_fn;
 206        pool->free = free_fn;
 207
 208        /*
 209         * First pre-allocate the guaranteed number of buffers.
 210         */
 211        while (pool->curr_nr < pool->min_nr) {
 212                void *element;
 213
 214                element = pool->alloc(gfp_mask, pool->pool_data);
 215                if (unlikely(!element)) {
 216                        mempool_destroy(pool);
 217                        return NULL;
 218                }
 219                add_element(pool, element);
 220        }
 221        return pool;
 222}
 223EXPORT_SYMBOL(mempool_create_node);
 224
 225/**
 226 * mempool_resize - resize an existing memory pool
 227 * @pool:       pointer to the memory pool which was allocated via
 228 *              mempool_create().
 229 * @new_min_nr: the new minimum number of elements guaranteed to be
 230 *              allocated for this pool.
 231 *
 232 * This function shrinks/grows the pool. In the case of growing,
 233 * it cannot be guaranteed that the pool will be grown to the new
 234 * size immediately, but new mempool_free() calls will refill it.
 235 * This function may sleep.
 236 *
 237 * Note, the caller must guarantee that no mempool_destroy is called
 238 * while this function is running. mempool_alloc() & mempool_free()
 239 * might be called (eg. from IRQ contexts) while this function executes.
 240 */
 241int mempool_resize(mempool_t *pool, int new_min_nr)
 242{
 243        void *element;
 244        void **new_elements;
 245        unsigned long flags;
 246
 247        BUG_ON(new_min_nr <= 0);
 248        might_sleep();
 249
 250        spin_lock_irqsave(&pool->lock, flags);
 251        if (new_min_nr <= pool->min_nr) {
 252                while (new_min_nr < pool->curr_nr) {
 253                        element = remove_element(pool, GFP_KERNEL);
 254                        spin_unlock_irqrestore(&pool->lock, flags);
 255                        pool->free(element, pool->pool_data);
 256                        spin_lock_irqsave(&pool->lock, flags);
 257                }
 258                pool->min_nr = new_min_nr;
 259                goto out_unlock;
 260        }
 261        spin_unlock_irqrestore(&pool->lock, flags);
 262
 263        /* Grow the pool */
 264        new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
 265                                     GFP_KERNEL);
 266        if (!new_elements)
 267                return -ENOMEM;
 268
 269        spin_lock_irqsave(&pool->lock, flags);
 270        if (unlikely(new_min_nr <= pool->min_nr)) {
 271                /* Raced, other resize will do our work */
 272                spin_unlock_irqrestore(&pool->lock, flags);
 273                kfree(new_elements);
 274                goto out;
 275        }
 276        memcpy(new_elements, pool->elements,
 277                        pool->curr_nr * sizeof(*new_elements));
 278        kfree(pool->elements);
 279        pool->elements = new_elements;
 280        pool->min_nr = new_min_nr;
 281
 282        while (pool->curr_nr < pool->min_nr) {
 283                spin_unlock_irqrestore(&pool->lock, flags);
 284                element = pool->alloc(GFP_KERNEL, pool->pool_data);
 285                if (!element)
 286                        goto out;
 287                spin_lock_irqsave(&pool->lock, flags);
 288                if (pool->curr_nr < pool->min_nr) {
 289                        add_element(pool, element);
 290                } else {
 291                        spin_unlock_irqrestore(&pool->lock, flags);
 292                        pool->free(element, pool->pool_data);   /* Raced */
 293                        goto out;
 294                }
 295        }
 296out_unlock:
 297        spin_unlock_irqrestore(&pool->lock, flags);
 298out:
 299        return 0;
 300}
 301EXPORT_SYMBOL(mempool_resize);
 302
 303/**
 304 * mempool_alloc - allocate an element from a specific memory pool
 305 * @pool:      pointer to the memory pool which was allocated via
 306 *             mempool_create().
 307 * @gfp_mask:  the usual allocation bitmask.
 308 *
 309 * this function only sleeps if the alloc_fn() function sleeps or
 310 * returns NULL. Note that due to preallocation, this function
 311 * *never* fails when called from process contexts. (it might
 312 * fail if called from an IRQ context.)
 313 * Note: neither __GFP_NOMEMALLOC nor __GFP_ZERO are supported.
 314 */
 315void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
 316{
 317        void *element;
 318        unsigned long flags;
 319        wait_queue_t wait;
 320        gfp_t gfp_temp;
 321
 322        /* If oom killed, memory reserves are essential to prevent livelock */
 323        VM_WARN_ON_ONCE(gfp_mask & __GFP_NOMEMALLOC);
 324        /* No element size to zero on allocation */
 325        VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
 326
 327        might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
 328
 329        gfp_mask |= __GFP_NORETRY;      /* don't loop in __alloc_pages */
 330        gfp_mask |= __GFP_NOWARN;       /* failures are OK */
 331
 332        gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
 333
 334repeat_alloc:
 335        if (likely(pool->curr_nr)) {
 336                /*
 337                 * Don't allocate from emergency reserves if there are
 338                 * elements available.  This check is racy, but it will
 339                 * be rechecked each loop.
 340                 */
 341                gfp_temp |= __GFP_NOMEMALLOC;
 342        }
 343
 344        element = pool->alloc(gfp_temp, pool->pool_data);
 345        if (likely(element != NULL))
 346                return element;
 347
 348        spin_lock_irqsave(&pool->lock, flags);
 349        if (likely(pool->curr_nr)) {
 350                element = remove_element(pool, gfp_temp);
 351                spin_unlock_irqrestore(&pool->lock, flags);
 352                /* paired with rmb in mempool_free(), read comment there */
 353                smp_wmb();
 354                /*
 355                 * Update the allocation stack trace as this is more useful
 356                 * for debugging.
 357                 */
 358                kmemleak_update_trace(element);
 359                return element;
 360        }
 361
 362        /*
 363         * We use gfp mask w/o direct reclaim or IO for the first round.  If
 364         * alloc failed with that and @pool was empty, retry immediately.
 365         */
 366        if ((gfp_temp & ~__GFP_NOMEMALLOC) != gfp_mask) {
 367                spin_unlock_irqrestore(&pool->lock, flags);
 368                gfp_temp = gfp_mask;
 369                goto repeat_alloc;
 370        }
 371        gfp_temp = gfp_mask;
 372
 373        /* We must not sleep if !__GFP_DIRECT_RECLAIM */
 374        if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
 375                spin_unlock_irqrestore(&pool->lock, flags);
 376                return NULL;
 377        }
 378
 379        /* Let's wait for someone else to return an element to @pool */
 380        init_wait(&wait);
 381        prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
 382
 383        spin_unlock_irqrestore(&pool->lock, flags);
 384
 385        /*
 386         * FIXME: this should be io_schedule().  The timeout is there as a
 387         * workaround for some DM problems in 2.6.18.
 388         */
 389        io_schedule_timeout(5*HZ);
 390
 391        finish_wait(&pool->wait, &wait);
 392        goto repeat_alloc;
 393}
 394EXPORT_SYMBOL(mempool_alloc);
 395
 396/**
 397 * mempool_free - return an element to the pool.
 398 * @element:   pool element pointer.
 399 * @pool:      pointer to the memory pool which was allocated via
 400 *             mempool_create().
 401 *
 402 * this function only sleeps if the free_fn() function sleeps.
 403 */
 404void mempool_free(void *element, mempool_t *pool)
 405{
 406        unsigned long flags;
 407
 408        if (unlikely(element == NULL))
 409                return;
 410
 411        /*
 412         * Paired with the wmb in mempool_alloc().  The preceding read is
 413         * for @element and the following @pool->curr_nr.  This ensures
 414         * that the visible value of @pool->curr_nr is from after the
 415         * allocation of @element.  This is necessary for fringe cases
 416         * where @element was passed to this task without going through
 417         * barriers.
 418         *
 419         * For example, assume @p is %NULL at the beginning and one task
 420         * performs "p = mempool_alloc(...);" while another task is doing
 421         * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
 422         * may end up using curr_nr value which is from before allocation
 423         * of @p without the following rmb.
 424         */
 425        smp_rmb();
 426
 427        /*
 428         * For correctness, we need a test which is guaranteed to trigger
 429         * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
 430         * without locking achieves that and refilling as soon as possible
 431         * is desirable.
 432         *
 433         * Because curr_nr visible here is always a value after the
 434         * allocation of @element, any task which decremented curr_nr below
 435         * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
 436         * incremented to min_nr afterwards.  If curr_nr gets incremented
 437         * to min_nr after the allocation of @element, the elements
 438         * allocated after that are subject to the same guarantee.
 439         *
 440         * Waiters happen iff curr_nr is 0 and the above guarantee also
 441         * ensures that there will be frees which return elements to the
 442         * pool waking up the waiters.
 443         */
 444        if (unlikely(pool->curr_nr < pool->min_nr)) {
 445                spin_lock_irqsave(&pool->lock, flags);
 446                if (likely(pool->curr_nr < pool->min_nr)) {
 447                        add_element(pool, element);
 448                        spin_unlock_irqrestore(&pool->lock, flags);
 449                        wake_up(&pool->wait);
 450                        return;
 451                }
 452                spin_unlock_irqrestore(&pool->lock, flags);
 453        }
 454        pool->free(element, pool->pool_data);
 455}
 456EXPORT_SYMBOL(mempool_free);
 457
 458/*
 459 * A commonly used alloc and free fn.
 460 */
 461void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
 462{
 463        struct kmem_cache *mem = pool_data;
 464        VM_BUG_ON(mem->ctor);
 465        return kmem_cache_alloc(mem, gfp_mask);
 466}
 467EXPORT_SYMBOL(mempool_alloc_slab);
 468
 469void mempool_free_slab(void *element, void *pool_data)
 470{
 471        struct kmem_cache *mem = pool_data;
 472        kmem_cache_free(mem, element);
 473}
 474EXPORT_SYMBOL(mempool_free_slab);
 475
 476/*
 477 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
 478 * specified by pool_data
 479 */
 480void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
 481{
 482        size_t size = (size_t)pool_data;
 483        return kmalloc(size, gfp_mask);
 484}
 485EXPORT_SYMBOL(mempool_kmalloc);
 486
 487void mempool_kfree(void *element, void *pool_data)
 488{
 489        kfree(element);
 490}
 491EXPORT_SYMBOL(mempool_kfree);
 492
 493/*
 494 * A simple mempool-backed page allocator that allocates pages
 495 * of the order specified by pool_data.
 496 */
 497void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
 498{
 499        int order = (int)(long)pool_data;
 500        return alloc_pages(gfp_mask, order);
 501}
 502EXPORT_SYMBOL(mempool_alloc_pages);
 503
 504void mempool_free_pages(void *element, void *pool_data)
 505{
 506        int order = (int)(long)pool_data;
 507        __free_pages(element, order);
 508}
 509EXPORT_SYMBOL(mempool_free_pages);
 510