linux/arch/arm/common/dmabounce.c
<<
>>
Prefs
   1/*
   2 *  arch/arm/common/dmabounce.c
   3 *
   4 *  Special dma_{map/unmap/dma_sync}_* routines for systems that have
   5 *  limited DMA windows. These functions utilize bounce buffers to
   6 *  copy data to/from buffers located outside the DMA region. This
   7 *  only works for systems in which DMA memory is at the bottom of
   8 *  RAM, the remainder of memory is at the top and the DMA memory
   9 *  can be marked as ZONE_DMA. Anything beyond that such as discontiguous
  10 *  DMA windows will require custom implementations that reserve memory
  11 *  areas at early bootup.
  12 *
  13 *  Original version by Brad Parker (brad@heeltoe.com)
  14 *  Re-written by Christopher Hoover <ch@murgatroid.com>
  15 *  Made generic by Deepak Saxena <dsaxena@plexity.net>
  16 *
  17 *  Copyright (C) 2002 Hewlett Packard Company.
  18 *  Copyright (C) 2004 MontaVista Software, Inc.
  19 *
  20 *  This program is free software; you can redistribute it and/or
  21 *  modify it under the terms of the GNU General Public License
  22 *  version 2 as published by the Free Software Foundation.
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/init.h>
  27#include <linux/slab.h>
  28#include <linux/page-flags.h>
  29#include <linux/device.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/dmapool.h>
  32#include <linux/list.h>
  33#include <linux/scatterlist.h>
  34
  35#include <asm/cacheflush.h>
  36
  37#undef STATS
  38
  39#ifdef STATS
  40#define DO_STATS(X) do { X ; } while (0)
  41#else
  42#define DO_STATS(X) do { } while (0)
  43#endif
  44
  45/* ************************************************** */
  46
  47struct safe_buffer {
  48        struct list_head node;
  49
  50        /* original request */
  51        void            *ptr;
  52        size_t          size;
  53        int             direction;
  54
  55        /* safe buffer info */
  56        struct dmabounce_pool *pool;
  57        void            *safe;
  58        dma_addr_t      safe_dma_addr;
  59};
  60
  61struct dmabounce_pool {
  62        unsigned long   size;
  63        struct dma_pool *pool;
  64#ifdef STATS
  65        unsigned long   allocs;
  66#endif
  67};
  68
  69struct dmabounce_device_info {
  70        struct device *dev;
  71        struct list_head safe_buffers;
  72#ifdef STATS
  73        unsigned long total_allocs;
  74        unsigned long map_op_count;
  75        unsigned long bounce_count;
  76        int attr_res;
  77#endif
  78        struct dmabounce_pool   small;
  79        struct dmabounce_pool   large;
  80
  81        rwlock_t lock;
  82};
  83
  84#ifdef STATS
  85static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
  86                              char *buf)
  87{
  88        struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
  89        return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
  90                device_info->small.allocs,
  91                device_info->large.allocs,
  92                device_info->total_allocs - device_info->small.allocs -
  93                        device_info->large.allocs,
  94                device_info->total_allocs,
  95                device_info->map_op_count,
  96                device_info->bounce_count);
  97}
  98
  99static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
 100#endif
 101
 102
 103/* allocate a 'safe' buffer and keep track of it */
 104static inline struct safe_buffer *
 105alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
 106                  size_t size, enum dma_data_direction dir)
 107{
 108        struct safe_buffer *buf;
 109        struct dmabounce_pool *pool;
 110        struct device *dev = device_info->dev;
 111        unsigned long flags;
 112
 113        dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
 114                __func__, ptr, size, dir);
 115
 116        if (size <= device_info->small.size) {
 117                pool = &device_info->small;
 118        } else if (size <= device_info->large.size) {
 119                pool = &device_info->large;
 120        } else {
 121                pool = NULL;
 122        }
 123
 124        buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
 125        if (buf == NULL) {
 126                dev_warn(dev, "%s: kmalloc failed\n", __func__);
 127                return NULL;
 128        }
 129
 130        buf->ptr = ptr;
 131        buf->size = size;
 132        buf->direction = dir;
 133        buf->pool = pool;
 134
 135        if (pool) {
 136                buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
 137                                           &buf->safe_dma_addr);
 138        } else {
 139                buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
 140                                               GFP_ATOMIC);
 141        }
 142
 143        if (buf->safe == NULL) {
 144                dev_warn(dev,
 145                         "%s: could not alloc dma memory (size=%d)\n",
 146                         __func__, size);
 147                kfree(buf);
 148                return NULL;
 149        }
 150
 151#ifdef STATS
 152        if (pool)
 153                pool->allocs++;
 154        device_info->total_allocs++;
 155#endif
 156
 157        write_lock_irqsave(&device_info->lock, flags);
 158        list_add(&buf->node, &device_info->safe_buffers);
 159        write_unlock_irqrestore(&device_info->lock, flags);
 160
 161        return buf;
 162}
 163
 164/* determine if a buffer is from our "safe" pool */
 165static inline struct safe_buffer *
 166find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
 167{
 168        struct safe_buffer *b, *rb = NULL;
 169        unsigned long flags;
 170
 171        read_lock_irqsave(&device_info->lock, flags);
 172
 173        list_for_each_entry(b, &device_info->safe_buffers, node)
 174                if (b->safe_dma_addr == safe_dma_addr) {
 175                        rb = b;
 176                        break;
 177                }
 178
 179        read_unlock_irqrestore(&device_info->lock, flags);
 180        return rb;
 181}
 182
 183static inline void
 184free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
 185{
 186        unsigned long flags;
 187
 188        dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
 189
 190        write_lock_irqsave(&device_info->lock, flags);
 191
 192        list_del(&buf->node);
 193
 194        write_unlock_irqrestore(&device_info->lock, flags);
 195
 196        if (buf->pool)
 197                dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
 198        else
 199                dma_free_coherent(device_info->dev, buf->size, buf->safe,
 200                                    buf->safe_dma_addr);
 201
 202        kfree(buf);
 203}
 204
 205/* ************************************************** */
 206
 207static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
 208                dma_addr_t dma_addr, const char *where)
 209{
 210        if (!dev || !dev->archdata.dmabounce)
 211                return NULL;
 212        if (dma_mapping_error(dev, dma_addr)) {
 213                if (dev)
 214                        dev_err(dev, "Trying to %s invalid mapping\n", where);
 215                else
 216                        pr_err("unknown device: Trying to %s invalid mapping\n", where);
 217                return NULL;
 218        }
 219        return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
 220}
 221
 222static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
 223                enum dma_data_direction dir)
 224{
 225        struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 226        dma_addr_t dma_addr;
 227        int needs_bounce = 0;
 228
 229        if (device_info)
 230                DO_STATS ( device_info->map_op_count++ );
 231
 232        dma_addr = virt_to_dma(dev, ptr);
 233
 234        if (dev->dma_mask) {
 235                unsigned long mask = *dev->dma_mask;
 236                unsigned long limit;
 237
 238                limit = (mask + 1) & ~mask;
 239                if (limit && size > limit) {
 240                        dev_err(dev, "DMA mapping too big (requested %#x "
 241                                "mask %#Lx)\n", size, *dev->dma_mask);
 242                        return ~0;
 243                }
 244
 245                /*
 246                 * Figure out if we need to bounce from the DMA mask.
 247                 */
 248                needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
 249        }
 250
 251        if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
 252                struct safe_buffer *buf;
 253
 254                buf = alloc_safe_buffer(device_info, ptr, size, dir);
 255                if (buf == 0) {
 256                        dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
 257                               __func__, ptr);
 258                        return 0;
 259                }
 260
 261                dev_dbg(dev,
 262                        "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
 263                        __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
 264                        buf->safe, buf->safe_dma_addr);
 265
 266                if ((dir == DMA_TO_DEVICE) ||
 267                    (dir == DMA_BIDIRECTIONAL)) {
 268                        dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
 269                                __func__, ptr, buf->safe, size);
 270                        memcpy(buf->safe, ptr, size);
 271                }
 272                ptr = buf->safe;
 273
 274                dma_addr = buf->safe_dma_addr;
 275        } else {
 276                /*
 277                 * We don't need to sync the DMA buffer since
 278                 * it was allocated via the coherent allocators.
 279                 */
 280                dma_cache_maint(ptr, size, dir);
 281        }
 282
 283        return dma_addr;
 284}
 285
 286static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
 287                size_t size, enum dma_data_direction dir)
 288{
 289        struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
 290
 291        if (buf) {
 292                BUG_ON(buf->size != size);
 293                BUG_ON(buf->direction != dir);
 294
 295                dev_dbg(dev,
 296                        "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
 297                        __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
 298                        buf->safe, buf->safe_dma_addr);
 299
 300                DO_STATS(dev->archdata.dmabounce->bounce_count++);
 301
 302                if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
 303                        void *ptr = buf->ptr;
 304
 305                        dev_dbg(dev,
 306                                "%s: copy back safe %p to unsafe %p size %d\n",
 307                                __func__, buf->safe, ptr, size);
 308                        memcpy(ptr, buf->safe, size);
 309
 310                        /*
 311                         * DMA buffers must have the same cache properties
 312                         * as if they were really used for DMA - which means
 313                         * data must be written back to RAM.  Note that
 314                         * we don't use dmac_flush_range() here for the
 315                         * bidirectional case because we know the cache
 316                         * lines will be coherent with the data written.
 317                         */
 318                        dmac_clean_range(ptr, ptr + size);
 319                        outer_clean_range(__pa(ptr), __pa(ptr) + size);
 320                }
 321                free_safe_buffer(dev->archdata.dmabounce, buf);
 322        }
 323}
 324
 325/* ************************************************** */
 326
 327/*
 328 * see if a buffer address is in an 'unsafe' range.  if it is
 329 * allocate a 'safe' buffer and copy the unsafe buffer into it.
 330 * substitute the safe buffer for the unsafe one.
 331 * (basically move the buffer from an unsafe area to a safe one)
 332 */
 333dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
 334                enum dma_data_direction dir)
 335{
 336        dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
 337                __func__, ptr, size, dir);
 338
 339        BUG_ON(!valid_dma_direction(dir));
 340
 341        return map_single(dev, ptr, size, dir);
 342}
 343EXPORT_SYMBOL(dma_map_single);
 344
 345dma_addr_t dma_map_page(struct device *dev, struct page *page,
 346                unsigned long offset, size_t size, enum dma_data_direction dir)
 347{
 348        dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
 349                __func__, page, offset, size, dir);
 350
 351        BUG_ON(!valid_dma_direction(dir));
 352
 353        if (PageHighMem(page)) {
 354                dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
 355                             "is not supported\n");
 356                return ~0;
 357        }
 358
 359        return map_single(dev, page_address(page) + offset, size, dir);
 360}
 361EXPORT_SYMBOL(dma_map_page);
 362
 363/*
 364 * see if a mapped address was really a "safe" buffer and if so, copy
 365 * the data from the safe buffer back to the unsafe buffer and free up
 366 * the safe buffer.  (basically return things back to the way they
 367 * should be)
 368 */
 369
 370void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
 371                enum dma_data_direction dir)
 372{
 373        dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
 374                __func__, (void *) dma_addr, size, dir);
 375
 376        unmap_single(dev, dma_addr, size, dir);
 377}
 378EXPORT_SYMBOL(dma_unmap_single);
 379
 380int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
 381                unsigned long off, size_t sz, enum dma_data_direction dir)
 382{
 383        struct safe_buffer *buf;
 384
 385        dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
 386                __func__, addr, off, sz, dir);
 387
 388        buf = find_safe_buffer_dev(dev, addr, __func__);
 389        if (!buf)
 390                return 1;
 391
 392        BUG_ON(buf->direction != dir);
 393
 394        dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
 395                __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
 396                buf->safe, buf->safe_dma_addr);
 397
 398        DO_STATS(dev->archdata.dmabounce->bounce_count++);
 399
 400        if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
 401                dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
 402                        __func__, buf->safe + off, buf->ptr + off, sz);
 403                memcpy(buf->ptr + off, buf->safe + off, sz);
 404        }
 405        return 0;
 406}
 407EXPORT_SYMBOL(dmabounce_sync_for_cpu);
 408
 409int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
 410                unsigned long off, size_t sz, enum dma_data_direction dir)
 411{
 412        struct safe_buffer *buf;
 413
 414        dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
 415                __func__, addr, off, sz, dir);
 416
 417        buf = find_safe_buffer_dev(dev, addr, __func__);
 418        if (!buf)
 419                return 1;
 420
 421        BUG_ON(buf->direction != dir);
 422
 423        dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
 424                __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
 425                buf->safe, buf->safe_dma_addr);
 426
 427        DO_STATS(dev->archdata.dmabounce->bounce_count++);
 428
 429        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
 430                dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
 431                        __func__,buf->ptr + off, buf->safe + off, sz);
 432                memcpy(buf->safe + off, buf->ptr + off, sz);
 433        }
 434        return 0;
 435}
 436EXPORT_SYMBOL(dmabounce_sync_for_device);
 437
 438static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
 439                const char *name, unsigned long size)
 440{
 441        pool->size = size;
 442        DO_STATS(pool->allocs = 0);
 443        pool->pool = dma_pool_create(name, dev, size,
 444                                     0 /* byte alignment */,
 445                                     0 /* no page-crossing issues */);
 446
 447        return pool->pool ? 0 : -ENOMEM;
 448}
 449
 450int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
 451                unsigned long large_buffer_size)
 452{
 453        struct dmabounce_device_info *device_info;
 454        int ret;
 455
 456        device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
 457        if (!device_info) {
 458                dev_err(dev,
 459                        "Could not allocated dmabounce_device_info\n");
 460                return -ENOMEM;
 461        }
 462
 463        ret = dmabounce_init_pool(&device_info->small, dev,
 464                                  "small_dmabounce_pool", small_buffer_size);
 465        if (ret) {
 466                dev_err(dev,
 467                        "dmabounce: could not allocate DMA pool for %ld byte objects\n",
 468                        small_buffer_size);
 469                goto err_free;
 470        }
 471
 472        if (large_buffer_size) {
 473                ret = dmabounce_init_pool(&device_info->large, dev,
 474                                          "large_dmabounce_pool",
 475                                          large_buffer_size);
 476                if (ret) {
 477                        dev_err(dev,
 478                                "dmabounce: could not allocate DMA pool for %ld byte objects\n",
 479                                large_buffer_size);
 480                        goto err_destroy;
 481                }
 482        }
 483
 484        device_info->dev = dev;
 485        INIT_LIST_HEAD(&device_info->safe_buffers);
 486        rwlock_init(&device_info->lock);
 487
 488#ifdef STATS
 489        device_info->total_allocs = 0;
 490        device_info->map_op_count = 0;
 491        device_info->bounce_count = 0;
 492        device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
 493#endif
 494
 495        dev->archdata.dmabounce = device_info;
 496
 497        dev_info(dev, "dmabounce: registered device\n");
 498
 499        return 0;
 500
 501 err_destroy:
 502        dma_pool_destroy(device_info->small.pool);
 503 err_free:
 504        kfree(device_info);
 505        return ret;
 506}
 507EXPORT_SYMBOL(dmabounce_register_dev);
 508
 509void dmabounce_unregister_dev(struct device *dev)
 510{
 511        struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 512
 513        dev->archdata.dmabounce = NULL;
 514
 515        if (!device_info) {
 516                dev_warn(dev,
 517                         "Never registered with dmabounce but attempting"
 518                         "to unregister!\n");
 519                return;
 520        }
 521
 522        if (!list_empty(&device_info->safe_buffers)) {
 523                dev_err(dev,
 524                        "Removing from dmabounce with pending buffers!\n");
 525                BUG();
 526        }
 527
 528        if (device_info->small.pool)
 529                dma_pool_destroy(device_info->small.pool);
 530        if (device_info->large.pool)
 531                dma_pool_destroy(device_info->large.pool);
 532
 533#ifdef STATS
 534        if (device_info->attr_res == 0)
 535                device_remove_file(dev, &dev_attr_dmabounce_stats);
 536#endif
 537
 538        kfree(device_info);
 539
 540        dev_info(dev, "dmabounce: device unregistered\n");
 541}
 542EXPORT_SYMBOL(dmabounce_unregister_dev);
 543
 544MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
 545MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
 546MODULE_LICENSE("GPL");
 547