linux/arch/c6x/mm/dma-coherent.c
<<
>>
Prefs
   1/*
   2 *  Port on Texas Instruments TMS320C6x architecture
   3 *
   4 *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
   5 *  Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
   6 *
   7 *  This program is free software; you can redistribute it and/or modify
   8 *  it under the terms of the GNU General Public License version 2 as
   9 *  published by the Free Software Foundation.
  10 *
  11 *  DMA uncached mapping support.
  12 *
  13 *  Using code pulled from ARM
  14 *  Copyright (C) 2000-2004 Russell King
  15 *
  16 */
  17#include <linux/slab.h>
  18#include <linux/bitmap.h>
  19#include <linux/bitops.h>
  20#include <linux/module.h>
  21#include <linux/interrupt.h>
  22#include <linux/dma-noncoherent.h>
  23#include <linux/memblock.h>
  24
  25#include <asm/cacheflush.h>
  26#include <asm/page.h>
  27#include <asm/setup.h>
  28
  29/*
  30 * DMA coherent memory management, can be redefined using the memdma=
  31 * kernel command line
  32 */
  33
  34/* none by default */
  35static phys_addr_t dma_base;
  36static u32 dma_size;
  37static u32 dma_pages;
  38
  39static unsigned long *dma_bitmap;
  40
  41/* bitmap lock */
  42static DEFINE_SPINLOCK(dma_lock);
  43
  44/*
  45 * Return a DMA coherent and contiguous memory chunk from the DMA memory
  46 */
  47static inline u32 __alloc_dma_pages(int order)
  48{
  49        unsigned long flags;
  50        u32 pos;
  51
  52        spin_lock_irqsave(&dma_lock, flags);
  53        pos = bitmap_find_free_region(dma_bitmap, dma_pages, order);
  54        spin_unlock_irqrestore(&dma_lock, flags);
  55
  56        return dma_base + (pos << PAGE_SHIFT);
  57}
  58
  59static void __free_dma_pages(u32 addr, int order)
  60{
  61        unsigned long flags;
  62        u32 pos = (addr - dma_base) >> PAGE_SHIFT;
  63
  64        if (addr < dma_base || (pos + (1 << order)) >= dma_pages) {
  65                printk(KERN_ERR "%s: freeing outside range.\n", __func__);
  66                BUG();
  67        }
  68
  69        spin_lock_irqsave(&dma_lock, flags);
  70        bitmap_release_region(dma_bitmap, pos, order);
  71        spin_unlock_irqrestore(&dma_lock, flags);
  72}
  73
  74/*
  75 * Allocate DMA coherent memory space and return both the kernel
  76 * virtual and DMA address for that space.
  77 */
  78void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
  79                gfp_t gfp, unsigned long attrs)
  80{
  81        void *ret;
  82        u32 paddr;
  83        int order;
  84
  85        if (!dma_size || !size)
  86                return NULL;
  87
  88        order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
  89
  90        paddr = __alloc_dma_pages(order);
  91
  92        if (handle)
  93                *handle = paddr;
  94
  95        if (!paddr)
  96                return NULL;
  97
  98        ret = phys_to_virt(paddr);
  99        memset(ret, 0, 1 << order);
 100        return ret;
 101}
 102
 103/*
 104 * Free DMA coherent memory as defined by the above mapping.
 105 */
 106void arch_dma_free(struct device *dev, size_t size, void *vaddr,
 107                dma_addr_t dma_handle, unsigned long attrs)
 108{
 109        int order;
 110
 111        if (!dma_size || !size)
 112                return;
 113
 114        order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
 115
 116        __free_dma_pages(virt_to_phys(vaddr), order);
 117}
 118
 119/*
 120 * Initialise the coherent DMA memory allocator using the given uncached region.
 121 */
 122void __init coherent_mem_init(phys_addr_t start, u32 size)
 123{
 124        phys_addr_t bitmap_phys;
 125
 126        if (!size)
 127                return;
 128
 129        printk(KERN_INFO
 130               "Coherent memory (DMA) region start=0x%x size=0x%x\n",
 131               start, size);
 132
 133        dma_base = start;
 134        dma_size = size;
 135
 136        /* allocate bitmap */
 137        dma_pages = dma_size >> PAGE_SHIFT;
 138        if (dma_size & (PAGE_SIZE - 1))
 139                ++dma_pages;
 140
 141        bitmap_phys = memblock_phys_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
 142                                          sizeof(long));
 143
 144        dma_bitmap = phys_to_virt(bitmap_phys);
 145        memset(dma_bitmap, 0, dma_pages * PAGE_SIZE);
 146}
 147
 148static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
 149                enum dma_data_direction dir)
 150{
 151        BUG_ON(!valid_dma_direction(dir));
 152
 153        switch (dir) {
 154        case DMA_FROM_DEVICE:
 155                L2_cache_block_invalidate(paddr, paddr + size);
 156                break;
 157        case DMA_TO_DEVICE:
 158                L2_cache_block_writeback(paddr, paddr + size);
 159                break;
 160        case DMA_BIDIRECTIONAL:
 161                L2_cache_block_writeback_invalidate(paddr, paddr + size);
 162                break;
 163        default:
 164                break;
 165        }
 166}
 167
 168void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
 169                size_t size, enum dma_data_direction dir)
 170{
 171        return c6x_dma_sync(dev, paddr, size, dir);
 172}
 173
 174void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
 175                size_t size, enum dma_data_direction dir)
 176{
 177        return c6x_dma_sync(dev, paddr, size, dir);
 178}
 179