linux/mm/dmapool.c
<<
>>
Prefs
   1/*
   2 * DMA Pool allocator
   3 *
   4 * Copyright 2001 David Brownell
   5 * Copyright 2007 Intel Corporation
   6 *   Author: Matthew Wilcox <willy@linux.intel.com>
   7 *
   8 * This software may be redistributed and/or modified under the terms of
   9 * the GNU General Public License ("GPL") version 2 as published by the
  10 * Free Software Foundation.
  11 *
  12 * This allocator returns small blocks of a given size which are DMA-able by
  13 * the given device.  It uses the dma_alloc_coherent page allocator to get
  14 * new pages, then splits them up into blocks of the required size.
  15 * Many older drivers still have their own code to do this.
  16 *
  17 * The current design of this allocator is fairly simple.  The pool is
  18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
  19 * allocated pages.  Each page in the page_list is split into blocks of at
  20 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
  21 * list of free blocks within the page.  Used blocks aren't tracked, but we
  22 * keep a count of how many are currently allocated from each page.
  23 */
  24
  25#include <linux/device.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/dmapool.h>
  28#include <linux/kernel.h>
  29#include <linux/list.h>
  30#include <linux/export.h>
  31#include <linux/mutex.h>
  32#include <linux/poison.h>
  33#include <linux/sched.h>
  34#include <linux/slab.h>
  35#include <linux/stat.h>
  36#include <linux/spinlock.h>
  37#include <linux/string.h>
  38#include <linux/types.h>
  39#include <linux/wait.h>
  40
  41#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
  42#define DMAPOOL_DEBUG 1
  43#endif
  44
  45struct dma_pool {               /* the pool */
  46        struct list_head page_list;
  47        spinlock_t lock;
  48        size_t size;
  49        struct device *dev;
  50        size_t allocation;
  51        size_t boundary;
  52        char name[32];
  53        struct list_head pools;
  54};
  55
  56struct dma_page {               /* cacheable header for 'allocation' bytes */
  57        struct list_head page_list;
  58        void *vaddr;
  59        dma_addr_t dma;
  60        unsigned int in_use;
  61        unsigned int offset;
  62};
  63
  64static DEFINE_MUTEX(pools_lock);
  65static DEFINE_MUTEX(pools_reg_lock);
  66
  67static ssize_t
  68show_pools(struct device *dev, struct device_attribute *attr, char *buf)
  69{
  70        unsigned temp;
  71        unsigned size;
  72        char *next;
  73        struct dma_page *page;
  74        struct dma_pool *pool;
  75
  76        next = buf;
  77        size = PAGE_SIZE;
  78
  79        temp = scnprintf(next, size, "poolinfo - 0.1\n");
  80        size -= temp;
  81        next += temp;
  82
  83        mutex_lock(&pools_lock);
  84        list_for_each_entry(pool, &dev->dma_pools, pools) {
  85                unsigned pages = 0;
  86                unsigned blocks = 0;
  87
  88                spin_lock_irq(&pool->lock);
  89                list_for_each_entry(page, &pool->page_list, page_list) {
  90                        pages++;
  91                        blocks += page->in_use;
  92                }
  93                spin_unlock_irq(&pool->lock);
  94
  95                /* per-pool info, no real statistics yet */
  96                temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
  97                                 pool->name, blocks,
  98                                 pages * (pool->allocation / pool->size),
  99                                 pool->size, pages);
 100                size -= temp;
 101                next += temp;
 102        }
 103        mutex_unlock(&pools_lock);
 104
 105        return PAGE_SIZE - size;
 106}
 107
 108static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
 109
 110/**
 111 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 112 * @name: name of pool, for diagnostics
 113 * @dev: device that will be doing the DMA
 114 * @size: size of the blocks in this pool.
 115 * @align: alignment requirement for blocks; must be a power of two
 116 * @boundary: returned blocks won't cross this power of two boundary
 117 * Context: !in_interrupt()
 118 *
 119 * Returns a dma allocation pool with the requested characteristics, or
 120 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
 121 * may be used to allocate memory.  Such memory will all have "consistent"
 122 * DMA mappings, accessible by the device and its driver without using
 123 * cache flushing primitives.  The actual size of blocks allocated may be
 124 * larger than requested because of alignment.
 125 *
 126 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
 127 * cross that size boundary.  This is useful for devices which have
 128 * addressing restrictions on individual DMA transfers, such as not crossing
 129 * boundaries of 4KBytes.
 130 */
 131struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 132                                 size_t size, size_t align, size_t boundary)
 133{
 134        struct dma_pool *retval;
 135        size_t allocation;
 136        bool empty = false;
 137
 138        if (align == 0)
 139                align = 1;
 140        else if (align & (align - 1))
 141                return NULL;
 142
 143        if (size == 0)
 144                return NULL;
 145        else if (size < 4)
 146                size = 4;
 147
 148        if ((size % align) != 0)
 149                size = ALIGN(size, align);
 150
 151        allocation = max_t(size_t, size, PAGE_SIZE);
 152
 153        if (!boundary)
 154                boundary = allocation;
 155        else if ((boundary < size) || (boundary & (boundary - 1)))
 156                return NULL;
 157
 158        retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
 159        if (!retval)
 160                return retval;
 161
 162        strlcpy(retval->name, name, sizeof(retval->name));
 163
 164        retval->dev = dev;
 165
 166        INIT_LIST_HEAD(&retval->page_list);
 167        spin_lock_init(&retval->lock);
 168        retval->size = size;
 169        retval->boundary = boundary;
 170        retval->allocation = allocation;
 171
 172        INIT_LIST_HEAD(&retval->pools);
 173
 174        /*
 175         * pools_lock ensures that the ->dma_pools list does not get corrupted.
 176         * pools_reg_lock ensures that there is not a race between
 177         * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
 178         * when the first invocation of dma_pool_create() failed on
 179         * device_create_file() and the second assumes that it has been done (I
 180         * know it is a short window).
 181         */
 182        mutex_lock(&pools_reg_lock);
 183        mutex_lock(&pools_lock);
 184        if (list_empty(&dev->dma_pools))
 185                empty = true;
 186        list_add(&retval->pools, &dev->dma_pools);
 187        mutex_unlock(&pools_lock);
 188        if (empty) {
 189                int err;
 190
 191                err = device_create_file(dev, &dev_attr_pools);
 192                if (err) {
 193                        mutex_lock(&pools_lock);
 194                        list_del(&retval->pools);
 195                        mutex_unlock(&pools_lock);
 196                        mutex_unlock(&pools_reg_lock);
 197                        kfree(retval);
 198                        return NULL;
 199                }
 200        }
 201        mutex_unlock(&pools_reg_lock);
 202        return retval;
 203}
 204EXPORT_SYMBOL(dma_pool_create);
 205
 206static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
 207{
 208        unsigned int offset = 0;
 209        unsigned int next_boundary = pool->boundary;
 210
 211        do {
 212                unsigned int next = offset + pool->size;
 213                if (unlikely((next + pool->size) >= next_boundary)) {
 214                        next = next_boundary;
 215                        next_boundary += pool->boundary;
 216                }
 217                *(int *)(page->vaddr + offset) = next;
 218                offset = next;
 219        } while (offset < pool->allocation);
 220}
 221
 222static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
 223{
 224        struct dma_page *page;
 225
 226        page = kmalloc(sizeof(*page), mem_flags);
 227        if (!page)
 228                return NULL;
 229        page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
 230                                         &page->dma, mem_flags);
 231        if (page->vaddr) {
 232#ifdef  DMAPOOL_DEBUG
 233                memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 234#endif
 235                pool_initialise_page(pool, page);
 236                page->in_use = 0;
 237                page->offset = 0;
 238        } else {
 239                kfree(page);
 240                page = NULL;
 241        }
 242        return page;
 243}
 244
 245static inline bool is_page_busy(struct dma_page *page)
 246{
 247        return page->in_use != 0;
 248}
 249
 250static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
 251{
 252        dma_addr_t dma = page->dma;
 253
 254#ifdef  DMAPOOL_DEBUG
 255        memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 256#endif
 257        dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
 258        list_del(&page->page_list);
 259        kfree(page);
 260}
 261
 262/**
 263 * dma_pool_destroy - destroys a pool of dma memory blocks.
 264 * @pool: dma pool that will be destroyed
 265 * Context: !in_interrupt()
 266 *
 267 * Caller guarantees that no more memory from the pool is in use,
 268 * and that nothing will try to use the pool after this call.
 269 */
 270void dma_pool_destroy(struct dma_pool *pool)
 271{
 272        bool empty = false;
 273
 274        if (unlikely(!pool))
 275                return;
 276
 277        mutex_lock(&pools_reg_lock);
 278        mutex_lock(&pools_lock);
 279        list_del(&pool->pools);
 280        if (pool->dev && list_empty(&pool->dev->dma_pools))
 281                empty = true;
 282        mutex_unlock(&pools_lock);
 283        if (empty)
 284                device_remove_file(pool->dev, &dev_attr_pools);
 285        mutex_unlock(&pools_reg_lock);
 286
 287        while (!list_empty(&pool->page_list)) {
 288                struct dma_page *page;
 289                page = list_entry(pool->page_list.next,
 290                                  struct dma_page, page_list);
 291                if (is_page_busy(page)) {
 292                        if (pool->dev)
 293                                dev_err(pool->dev,
 294                                        "dma_pool_destroy %s, %p busy\n",
 295                                        pool->name, page->vaddr);
 296                        else
 297                                pr_err("dma_pool_destroy %s, %p busy\n",
 298                                       pool->name, page->vaddr);
 299                        /* leak the still-in-use consistent memory */
 300                        list_del(&page->page_list);
 301                        kfree(page);
 302                } else
 303                        pool_free_page(pool, page);
 304        }
 305
 306        kfree(pool);
 307}
 308EXPORT_SYMBOL(dma_pool_destroy);
 309
 310/**
 311 * dma_pool_alloc - get a block of consistent memory
 312 * @pool: dma pool that will produce the block
 313 * @mem_flags: GFP_* bitmask
 314 * @handle: pointer to dma address of block
 315 *
 316 * This returns the kernel virtual address of a currently unused block,
 317 * and reports its dma address through the handle.
 318 * If such a memory block can't be allocated, %NULL is returned.
 319 */
 320void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 321                     dma_addr_t *handle)
 322{
 323        unsigned long flags;
 324        struct dma_page *page;
 325        size_t offset;
 326        void *retval;
 327
 328        might_sleep_if(gfpflags_allow_blocking(mem_flags));
 329
 330        spin_lock_irqsave(&pool->lock, flags);
 331        list_for_each_entry(page, &pool->page_list, page_list) {
 332                if (page->offset < pool->allocation)
 333                        goto ready;
 334        }
 335
 336        /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
 337        spin_unlock_irqrestore(&pool->lock, flags);
 338
 339        page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
 340        if (!page)
 341                return NULL;
 342
 343        spin_lock_irqsave(&pool->lock, flags);
 344
 345        list_add(&page->page_list, &pool->page_list);
 346 ready:
 347        page->in_use++;
 348        offset = page->offset;
 349        page->offset = *(int *)(page->vaddr + offset);
 350        retval = offset + page->vaddr;
 351        *handle = offset + page->dma;
 352#ifdef  DMAPOOL_DEBUG
 353        {
 354                int i;
 355                u8 *data = retval;
 356                /* page->offset is stored in first 4 bytes */
 357                for (i = sizeof(page->offset); i < pool->size; i++) {
 358                        if (data[i] == POOL_POISON_FREED)
 359                                continue;
 360                        if (pool->dev)
 361                                dev_err(pool->dev,
 362                                        "dma_pool_alloc %s, %p (corrupted)\n",
 363                                        pool->name, retval);
 364                        else
 365                                pr_err("dma_pool_alloc %s, %p (corrupted)\n",
 366                                        pool->name, retval);
 367
 368                        /*
 369                         * Dump the first 4 bytes even if they are not
 370                         * POOL_POISON_FREED
 371                         */
 372                        print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
 373                                        data, pool->size, 1);
 374                        break;
 375                }
 376        }
 377        if (!(mem_flags & __GFP_ZERO))
 378                memset(retval, POOL_POISON_ALLOCATED, pool->size);
 379#endif
 380        spin_unlock_irqrestore(&pool->lock, flags);
 381
 382        if (mem_flags & __GFP_ZERO)
 383                memset(retval, 0, pool->size);
 384
 385        return retval;
 386}
 387EXPORT_SYMBOL(dma_pool_alloc);
 388
 389static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
 390{
 391        struct dma_page *page;
 392
 393        list_for_each_entry(page, &pool->page_list, page_list) {
 394                if (dma < page->dma)
 395                        continue;
 396                if ((dma - page->dma) < pool->allocation)
 397                        return page;
 398        }
 399        return NULL;
 400}
 401
 402/**
 403 * dma_pool_free - put block back into dma pool
 404 * @pool: the dma pool holding the block
 405 * @vaddr: virtual address of block
 406 * @dma: dma address of block
 407 *
 408 * Caller promises neither device nor driver will again touch this block
 409 * unless it is first re-allocated.
 410 */
 411void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
 412{
 413        struct dma_page *page;
 414        unsigned long flags;
 415        unsigned int offset;
 416
 417        spin_lock_irqsave(&pool->lock, flags);
 418        page = pool_find_page(pool, dma);
 419        if (!page) {
 420                spin_unlock_irqrestore(&pool->lock, flags);
 421                if (pool->dev)
 422                        dev_err(pool->dev,
 423                                "dma_pool_free %s, %p/%lx (bad dma)\n",
 424                                pool->name, vaddr, (unsigned long)dma);
 425                else
 426                        pr_err("dma_pool_free %s, %p/%lx (bad dma)\n",
 427                               pool->name, vaddr, (unsigned long)dma);
 428                return;
 429        }
 430
 431        offset = vaddr - page->vaddr;
 432#ifdef  DMAPOOL_DEBUG
 433        if ((dma - page->dma) != offset) {
 434                spin_unlock_irqrestore(&pool->lock, flags);
 435                if (pool->dev)
 436                        dev_err(pool->dev,
 437                                "dma_pool_free %s, %p (bad vaddr)/%pad\n",
 438                                pool->name, vaddr, &dma);
 439                else
 440                        pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n",
 441                               pool->name, vaddr, &dma);
 442                return;
 443        }
 444        {
 445                unsigned int chain = page->offset;
 446                while (chain < pool->allocation) {
 447                        if (chain != offset) {
 448                                chain = *(int *)(page->vaddr + chain);
 449                                continue;
 450                        }
 451                        spin_unlock_irqrestore(&pool->lock, flags);
 452                        if (pool->dev)
 453                                dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n",
 454                                        pool->name, &dma);
 455                        else
 456                                pr_err("dma_pool_free %s, dma %pad already free\n",
 457                                       pool->name, &dma);
 458                        return;
 459                }
 460        }
 461        memset(vaddr, POOL_POISON_FREED, pool->size);
 462#endif
 463
 464        page->in_use--;
 465        *(int *)vaddr = page->offset;
 466        page->offset = offset;
 467        /*
 468         * Resist a temptation to do
 469         *    if (!is_page_busy(page)) pool_free_page(pool, page);
 470         * Better have a few empty pages hang around.
 471         */
 472        spin_unlock_irqrestore(&pool->lock, flags);
 473}
 474EXPORT_SYMBOL(dma_pool_free);
 475
 476/*
 477 * Managed DMA pool
 478 */
 479static void dmam_pool_release(struct device *dev, void *res)
 480{
 481        struct dma_pool *pool = *(struct dma_pool **)res;
 482
 483        dma_pool_destroy(pool);
 484}
 485
 486static int dmam_pool_match(struct device *dev, void *res, void *match_data)
 487{
 488        return *(struct dma_pool **)res == match_data;
 489}
 490
 491/**
 492 * dmam_pool_create - Managed dma_pool_create()
 493 * @name: name of pool, for diagnostics
 494 * @dev: device that will be doing the DMA
 495 * @size: size of the blocks in this pool.
 496 * @align: alignment requirement for blocks; must be a power of two
 497 * @allocation: returned blocks won't cross this boundary (or zero)
 498 *
 499 * Managed dma_pool_create().  DMA pool created with this function is
 500 * automatically destroyed on driver detach.
 501 */
 502struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
 503                                  size_t size, size_t align, size_t allocation)
 504{
 505        struct dma_pool **ptr, *pool;
 506
 507        ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
 508        if (!ptr)
 509                return NULL;
 510
 511        pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
 512        if (pool)
 513                devres_add(dev, ptr);
 514        else
 515                devres_free(ptr);
 516
 517        return pool;
 518}
 519EXPORT_SYMBOL(dmam_pool_create);
 520
 521/**
 522 * dmam_pool_destroy - Managed dma_pool_destroy()
 523 * @pool: dma pool that will be destroyed
 524 *
 525 * Managed dma_pool_destroy().
 526 */
 527void dmam_pool_destroy(struct dma_pool *pool)
 528{
 529        struct device *dev = pool->dev;
 530
 531        WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
 532}
 533EXPORT_SYMBOL(dmam_pool_destroy);
 534