linux/arch/arm/common/dmabounce.c
<<
>>
Prefs
   1/*
   2 *  arch/arm/common/dmabounce.c
   3 *
   4 *  Special dma_{map/unmap/dma_sync}_* routines for systems that have
   5 *  limited DMA windows. These functions utilize bounce buffers to
   6 *  copy data to/from buffers located outside the DMA region. This
   7 *  only works for systems in which DMA memory is at the bottom of
   8 *  RAM, the remainder of memory is at the top and the DMA memory
   9 *  can be marked as ZONE_DMA. Anything beyond that such as discontiguous
  10 *  DMA windows will require custom implementations that reserve memory
  11 *  areas at early bootup.
  12 *
  13 *  Original version by Brad Parker (brad@heeltoe.com)
  14 *  Re-written by Christopher Hoover <ch@murgatroid.com>
  15 *  Made generic by Deepak Saxena <dsaxena@plexity.net>
  16 *
  17 *  Copyright (C) 2002 Hewlett Packard Company.
  18 *  Copyright (C) 2004 MontaVista Software, Inc.
  19 *
  20 *  This program is free software; you can redistribute it and/or
  21 *  modify it under the terms of the GNU General Public License
  22 *  version 2 as published by the Free Software Foundation.
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/init.h>
  27#include <linux/slab.h>
  28#include <linux/device.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/dmapool.h>
  31#include <linux/list.h>
  32#include <linux/scatterlist.h>
  33
  34#include <asm/cacheflush.h>
  35
  36#undef STATS
  37
  38#ifdef STATS
  39#define DO_STATS(X) do { X ; } while (0)
  40#else
  41#define DO_STATS(X) do { } while (0)
  42#endif
  43
  44/* ************************************************** */
  45
  46struct safe_buffer {
  47        struct list_head node;
  48
  49        /* original request */
  50        void            *ptr;
  51        size_t          size;
  52        int             direction;
  53
  54        /* safe buffer info */
  55        struct dmabounce_pool *pool;
  56        void            *safe;
  57        dma_addr_t      safe_dma_addr;
  58};
  59
  60struct dmabounce_pool {
  61        unsigned long   size;
  62        struct dma_pool *pool;
  63#ifdef STATS
  64        unsigned long   allocs;
  65#endif
  66};
  67
  68struct dmabounce_device_info {
  69        struct device *dev;
  70        struct list_head safe_buffers;
  71#ifdef STATS
  72        unsigned long total_allocs;
  73        unsigned long map_op_count;
  74        unsigned long bounce_count;
  75        int attr_res;
  76#endif
  77        struct dmabounce_pool   small;
  78        struct dmabounce_pool   large;
  79
  80        rwlock_t lock;
  81};
  82
  83#ifdef STATS
  84static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
  85                              char *buf)
  86{
  87        struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
  88        return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
  89                device_info->small.allocs,
  90                device_info->large.allocs,
  91                device_info->total_allocs - device_info->small.allocs -
  92                        device_info->large.allocs,
  93                device_info->total_allocs,
  94                device_info->map_op_count,
  95                device_info->bounce_count);
  96}
  97
  98static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
  99#endif
 100
 101
 102/* allocate a 'safe' buffer and keep track of it */
 103static inline struct safe_buffer *
 104alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
 105                  size_t size, enum dma_data_direction dir)
 106{
 107        struct safe_buffer *buf;
 108        struct dmabounce_pool *pool;
 109        struct device *dev = device_info->dev;
 110        unsigned long flags;
 111
 112        dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
 113                __func__, ptr, size, dir);
 114
 115        if (size <= device_info->small.size) {
 116                pool = &device_info->small;
 117        } else if (size <= device_info->large.size) {
 118                pool = &device_info->large;
 119        } else {
 120                pool = NULL;
 121        }
 122
 123        buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
 124        if (buf == NULL) {
 125                dev_warn(dev, "%s: kmalloc failed\n", __func__);
 126                return NULL;
 127        }
 128
 129        buf->ptr = ptr;
 130        buf->size = size;
 131        buf->direction = dir;
 132        buf->pool = pool;
 133
 134        if (pool) {
 135                buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
 136                                           &buf->safe_dma_addr);
 137        } else {
 138                buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
 139                                               GFP_ATOMIC);
 140        }
 141
 142        if (buf->safe == NULL) {
 143                dev_warn(dev,
 144                         "%s: could not alloc dma memory (size=%d)\n",
 145                         __func__, size);
 146                kfree(buf);
 147                return NULL;
 148        }
 149
 150#ifdef STATS
 151        if (pool)
 152                pool->allocs++;
 153        device_info->total_allocs++;
 154#endif
 155
 156        write_lock_irqsave(&device_info->lock, flags);
 157
 158        list_add(&buf->node, &device_info->safe_buffers);
 159
 160        write_unlock_irqrestore(&device_info->lock, flags);
 161
 162        return buf;
 163}
 164
 165/* determine if a buffer is from our "safe" pool */
 166static inline struct safe_buffer *
 167find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
 168{
 169        struct safe_buffer *b, *rb = NULL;
 170        unsigned long flags;
 171
 172        read_lock_irqsave(&device_info->lock, flags);
 173
 174        list_for_each_entry(b, &device_info->safe_buffers, node)
 175                if (b->safe_dma_addr == safe_dma_addr) {
 176                        rb = b;
 177                        break;
 178                }
 179
 180        read_unlock_irqrestore(&device_info->lock, flags);
 181        return rb;
 182}
 183
 184static inline void
 185free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
 186{
 187        unsigned long flags;
 188
 189        dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
 190
 191        write_lock_irqsave(&device_info->lock, flags);
 192
 193        list_del(&buf->node);
 194
 195        write_unlock_irqrestore(&device_info->lock, flags);
 196
 197        if (buf->pool)
 198                dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
 199        else
 200                dma_free_coherent(device_info->dev, buf->size, buf->safe,
 201                                    buf->safe_dma_addr);
 202
 203        kfree(buf);
 204}
 205
 206/* ************************************************** */
 207
 208static inline dma_addr_t
 209map_single(struct device *dev, void *ptr, size_t size,
 210                enum dma_data_direction dir)
 211{
 212        struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 213        dma_addr_t dma_addr;
 214        int needs_bounce = 0;
 215
 216        if (device_info)
 217                DO_STATS ( device_info->map_op_count++ );
 218
 219        dma_addr = virt_to_dma(dev, ptr);
 220
 221        if (dev->dma_mask) {
 222                unsigned long mask = *dev->dma_mask;
 223                unsigned long limit;
 224
 225                limit = (mask + 1) & ~mask;
 226                if (limit && size > limit) {
 227                        dev_err(dev, "DMA mapping too big (requested %#x "
 228                                "mask %#Lx)\n", size, *dev->dma_mask);
 229                        return ~0;
 230                }
 231
 232                /*
 233                 * Figure out if we need to bounce from the DMA mask.
 234                 */
 235                needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
 236        }
 237
 238        if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
 239                struct safe_buffer *buf;
 240
 241                buf = alloc_safe_buffer(device_info, ptr, size, dir);
 242                if (buf == 0) {
 243                        dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
 244                               __func__, ptr);
 245                        return 0;
 246                }
 247
 248                dev_dbg(dev,
 249                        "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
 250                        __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
 251                        buf->safe, (void *) buf->safe_dma_addr);
 252
 253                if ((dir == DMA_TO_DEVICE) ||
 254                    (dir == DMA_BIDIRECTIONAL)) {
 255                        dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
 256                                __func__, ptr, buf->safe, size);
 257                        memcpy(buf->safe, ptr, size);
 258                }
 259                ptr = buf->safe;
 260
 261                dma_addr = buf->safe_dma_addr;
 262        } else {
 263                /*
 264                 * We don't need to sync the DMA buffer since
 265                 * it was allocated via the coherent allocators.
 266                 */
 267                dma_cache_maint(ptr, size, dir);
 268        }
 269
 270        return dma_addr;
 271}
 272
 273static inline void
 274unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
 275                enum dma_data_direction dir)
 276{
 277        struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 278        struct safe_buffer *buf = NULL;
 279
 280        /*
 281         * Trying to unmap an invalid mapping
 282         */
 283        if (dma_mapping_error(dma_addr)) {
 284                dev_err(dev, "Trying to unmap invalid mapping\n");
 285                return;
 286        }
 287
 288        if (device_info)
 289                buf = find_safe_buffer(device_info, dma_addr);
 290
 291        if (buf) {
 292                BUG_ON(buf->size != size);
 293
 294                dev_dbg(dev,
 295                        "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
 296                        __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
 297                        buf->safe, (void *) buf->safe_dma_addr);
 298
 299                DO_STATS ( device_info->bounce_count++ );
 300
 301                if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
 302                        void *ptr = buf->ptr;
 303
 304                        dev_dbg(dev,
 305                                "%s: copy back safe %p to unsafe %p size %d\n",
 306                                __func__, buf->safe, ptr, size);
 307                        memcpy(ptr, buf->safe, size);
 308
 309                        /*
 310                         * DMA buffers must have the same cache properties
 311                         * as if they were really used for DMA - which means
 312                         * data must be written back to RAM.  Note that
 313                         * we don't use dmac_flush_range() here for the
 314                         * bidirectional case because we know the cache
 315                         * lines will be coherent with the data written.
 316                         */
 317                        dmac_clean_range(ptr, ptr + size);
 318                        outer_clean_range(__pa(ptr), __pa(ptr) + size);
 319                }
 320                free_safe_buffer(device_info, buf);
 321        }
 322}
 323
 324static inline void
 325sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
 326                enum dma_data_direction dir)
 327{
 328        struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 329        struct safe_buffer *buf = NULL;
 330
 331        if (device_info)
 332                buf = find_safe_buffer(device_info, dma_addr);
 333
 334        if (buf) {
 335                /*
 336                 * Both of these checks from original code need to be
 337                 * commented out b/c some drivers rely on the following:
 338                 *
 339                 * 1) Drivers may map a large chunk of memory into DMA space
 340                 *    but only sync a small portion of it. Good example is
 341                 *    allocating a large buffer, mapping it, and then
 342                 *    breaking it up into small descriptors. No point
 343                 *    in syncing the whole buffer if you only have to
 344                 *    touch one descriptor.
 345                 *
 346                 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
 347                 *    usually only synced in one dir at a time.
 348                 *
 349                 * See drivers/net/eepro100.c for examples of both cases.
 350                 *
 351                 * -ds
 352                 *
 353                 * BUG_ON(buf->size != size);
 354                 * BUG_ON(buf->direction != dir);
 355                 */
 356
 357                dev_dbg(dev,
 358                        "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
 359                        __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
 360                        buf->safe, (void *) buf->safe_dma_addr);
 361
 362                DO_STATS ( device_info->bounce_count++ );
 363
 364                switch (dir) {
 365                case DMA_FROM_DEVICE:
 366                        dev_dbg(dev,
 367                                "%s: copy back safe %p to unsafe %p size %d\n",
 368                                __func__, buf->safe, buf->ptr, size);
 369                        memcpy(buf->ptr, buf->safe, size);
 370                        break;
 371                case DMA_TO_DEVICE:
 372                        dev_dbg(dev,
 373                                "%s: copy out unsafe %p to safe %p, size %d\n",
 374                                __func__,buf->ptr, buf->safe, size);
 375                        memcpy(buf->safe, buf->ptr, size);
 376                        break;
 377                case DMA_BIDIRECTIONAL:
 378                        BUG();  /* is this allowed?  what does it mean? */
 379                default:
 380                        BUG();
 381                }
 382                /*
 383                 * No need to sync the safe buffer - it was allocated
 384                 * via the coherent allocators.
 385                 */
 386        } else {
 387                dma_cache_maint(dma_to_virt(dev, dma_addr), size, dir);
 388        }
 389}
 390
 391/* ************************************************** */
 392
 393/*
 394 * see if a buffer address is in an 'unsafe' range.  if it is
 395 * allocate a 'safe' buffer and copy the unsafe buffer into it.
 396 * substitute the safe buffer for the unsafe one.
 397 * (basically move the buffer from an unsafe area to a safe one)
 398 */
 399dma_addr_t
 400dma_map_single(struct device *dev, void *ptr, size_t size,
 401                enum dma_data_direction dir)
 402{
 403        dma_addr_t dma_addr;
 404
 405        dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
 406                __func__, ptr, size, dir);
 407
 408        BUG_ON(dir == DMA_NONE);
 409
 410        dma_addr = map_single(dev, ptr, size, dir);
 411
 412        return dma_addr;
 413}
 414
 415/*
 416 * see if a mapped address was really a "safe" buffer and if so, copy
 417 * the data from the safe buffer back to the unsafe buffer and free up
 418 * the safe buffer.  (basically return things back to the way they
 419 * should be)
 420 */
 421
 422void
 423dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
 424                        enum dma_data_direction dir)
 425{
 426        dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
 427                __func__, (void *) dma_addr, size, dir);
 428
 429        BUG_ON(dir == DMA_NONE);
 430
 431        unmap_single(dev, dma_addr, size, dir);
 432}
 433
 434int
 435dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 436                enum dma_data_direction dir)
 437{
 438        int i;
 439
 440        dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
 441                __func__, sg, nents, dir);
 442
 443        BUG_ON(dir == DMA_NONE);
 444
 445        for (i = 0; i < nents; i++, sg++) {
 446                struct page *page = sg_page(sg);
 447                unsigned int offset = sg->offset;
 448                unsigned int length = sg->length;
 449                void *ptr = page_address(page) + offset;
 450
 451                sg->dma_address =
 452                        map_single(dev, ptr, length, dir);
 453        }
 454
 455        return nents;
 456}
 457
 458void
 459dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
 460                enum dma_data_direction dir)
 461{
 462        int i;
 463
 464        dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
 465                __func__, sg, nents, dir);
 466
 467        BUG_ON(dir == DMA_NONE);
 468
 469        for (i = 0; i < nents; i++, sg++) {
 470                dma_addr_t dma_addr = sg->dma_address;
 471                unsigned int length = sg->length;
 472
 473                unmap_single(dev, dma_addr, length, dir);
 474        }
 475}
 476
 477void
 478dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,
 479                                enum dma_data_direction dir)
 480{
 481        dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
 482                __func__, (void *) dma_addr, size, dir);
 483
 484        sync_single(dev, dma_addr, size, dir);
 485}
 486
 487void
 488dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,
 489                                enum dma_data_direction dir)
 490{
 491        dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
 492                __func__, (void *) dma_addr, size, dir);
 493
 494        sync_single(dev, dma_addr, size, dir);
 495}
 496
 497void
 498dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
 499                        enum dma_data_direction dir)
 500{
 501        int i;
 502
 503        dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
 504                __func__, sg, nents, dir);
 505
 506        BUG_ON(dir == DMA_NONE);
 507
 508        for (i = 0; i < nents; i++, sg++) {
 509                dma_addr_t dma_addr = sg->dma_address;
 510                unsigned int length = sg->length;
 511
 512                sync_single(dev, dma_addr, length, dir);
 513        }
 514}
 515
 516void
 517dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
 518                        enum dma_data_direction dir)
 519{
 520        int i;
 521
 522        dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
 523                __func__, sg, nents, dir);
 524
 525        BUG_ON(dir == DMA_NONE);
 526
 527        for (i = 0; i < nents; i++, sg++) {
 528                dma_addr_t dma_addr = sg->dma_address;
 529                unsigned int length = sg->length;
 530
 531                sync_single(dev, dma_addr, length, dir);
 532        }
 533}
 534
 535static int
 536dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
 537                    unsigned long size)
 538{
 539        pool->size = size;
 540        DO_STATS(pool->allocs = 0);
 541        pool->pool = dma_pool_create(name, dev, size,
 542                                     0 /* byte alignment */,
 543                                     0 /* no page-crossing issues */);
 544
 545        return pool->pool ? 0 : -ENOMEM;
 546}
 547
 548int
 549dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
 550                        unsigned long large_buffer_size)
 551{
 552        struct dmabounce_device_info *device_info;
 553        int ret;
 554
 555        device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
 556        if (!device_info) {
 557                printk(KERN_ERR
 558                        "Could not allocated dmabounce_device_info for %s",
 559                        dev->bus_id);
 560                return -ENOMEM;
 561        }
 562
 563        ret = dmabounce_init_pool(&device_info->small, dev,
 564                                  "small_dmabounce_pool", small_buffer_size);
 565        if (ret) {
 566                dev_err(dev,
 567                        "dmabounce: could not allocate DMA pool for %ld byte objects\n",
 568                        small_buffer_size);
 569                goto err_free;
 570        }
 571
 572        if (large_buffer_size) {
 573                ret = dmabounce_init_pool(&device_info->large, dev,
 574                                          "large_dmabounce_pool",
 575                                          large_buffer_size);
 576                if (ret) {
 577                        dev_err(dev,
 578                                "dmabounce: could not allocate DMA pool for %ld byte objects\n",
 579                                large_buffer_size);
 580                        goto err_destroy;
 581                }
 582        }
 583
 584        device_info->dev = dev;
 585        INIT_LIST_HEAD(&device_info->safe_buffers);
 586        rwlock_init(&device_info->lock);
 587
 588#ifdef STATS
 589        device_info->total_allocs = 0;
 590        device_info->map_op_count = 0;
 591        device_info->bounce_count = 0;
 592        device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
 593#endif
 594
 595        dev->archdata.dmabounce = device_info;
 596
 597        printk(KERN_INFO "dmabounce: registered device %s on %s bus\n",
 598                dev->bus_id, dev->bus->name);
 599
 600        return 0;
 601
 602 err_destroy:
 603        dma_pool_destroy(device_info->small.pool);
 604 err_free:
 605        kfree(device_info);
 606        return ret;
 607}
 608
 609void
 610dmabounce_unregister_dev(struct device *dev)
 611{
 612        struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 613
 614        dev->archdata.dmabounce = NULL;
 615
 616        if (!device_info) {
 617                printk(KERN_WARNING
 618                        "%s: Never registered with dmabounce but attempting" \
 619                        "to unregister!\n", dev->bus_id);
 620                return;
 621        }
 622
 623        if (!list_empty(&device_info->safe_buffers)) {
 624                printk(KERN_ERR
 625                        "%s: Removing from dmabounce with pending buffers!\n",
 626                        dev->bus_id);
 627                BUG();
 628        }
 629
 630        if (device_info->small.pool)
 631                dma_pool_destroy(device_info->small.pool);
 632        if (device_info->large.pool)
 633                dma_pool_destroy(device_info->large.pool);
 634
 635#ifdef STATS
 636        if (device_info->attr_res == 0)
 637                device_remove_file(dev, &dev_attr_dmabounce_stats);
 638#endif
 639
 640        kfree(device_info);
 641
 642        printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n",
 643                dev->bus_id, dev->bus->name);
 644}
 645
 646
 647EXPORT_SYMBOL(dma_map_single);
 648EXPORT_SYMBOL(dma_unmap_single);
 649EXPORT_SYMBOL(dma_map_sg);
 650EXPORT_SYMBOL(dma_unmap_sg);
 651EXPORT_SYMBOL(dma_sync_single_for_cpu);
 652EXPORT_SYMBOL(dma_sync_single_for_device);
 653EXPORT_SYMBOL(dma_sync_sg);
 654EXPORT_SYMBOL(dmabounce_register_dev);
 655EXPORT_SYMBOL(dmabounce_unregister_dev);
 656
 657MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
 658MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
 659MODULE_LICENSE("GPL");
 660