linux/arch/avr32/mm/dma-coherent.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2004-2006 Atmel Corporation
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/dma-mapping.h>
  10#include <linux/gfp.h>
  11#include <linux/export.h>
  12
  13#include <asm/addrspace.h>
  14#include <asm/cacheflush.h>
  15
  16void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
  17{
  18        /*
  19         * No need to sync an uncached area
  20         */
  21        if (PXSEG(vaddr) == P2SEG)
  22                return;
  23
  24        switch (direction) {
  25        case DMA_FROM_DEVICE:           /* invalidate only */
  26                invalidate_dcache_region(vaddr, size);
  27                break;
  28        case DMA_TO_DEVICE:             /* writeback only */
  29                clean_dcache_region(vaddr, size);
  30                break;
  31        case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
  32                flush_dcache_region(vaddr, size);
  33                break;
  34        default:
  35                BUG();
  36        }
  37}
  38EXPORT_SYMBOL(dma_cache_sync);
  39
  40static struct page *__dma_alloc(struct device *dev, size_t size,
  41                                dma_addr_t *handle, gfp_t gfp)
  42{
  43        struct page *page, *free, *end;
  44        int order;
  45
  46        /* Following is a work-around (a.k.a. hack) to prevent pages
  47         * with __GFP_COMP being passed to split_page() which cannot
  48         * handle them.  The real problem is that this flag probably
  49         * should be 0 on AVR32 as it is not supported on this
  50         * platform--see CONFIG_HUGETLB_PAGE. */
  51        gfp &= ~(__GFP_COMP);
  52
  53        size = PAGE_ALIGN(size);
  54        order = get_order(size);
  55
  56        page = alloc_pages(gfp, order);
  57        if (!page)
  58                return NULL;
  59        split_page(page, order);
  60
  61        /*
  62         * When accessing physical memory with valid cache data, we
  63         * get a cache hit even if the virtual memory region is marked
  64         * as uncached.
  65         *
  66         * Since the memory is newly allocated, there is no point in
  67         * doing a writeback. If the previous owner cares, he should
  68         * have flushed the cache before releasing the memory.
  69         */
  70        invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
  71
  72        *handle = page_to_bus(page);
  73        free = page + (size >> PAGE_SHIFT);
  74        end = page + (1 << order);
  75
  76        /*
  77         * Free any unused pages
  78         */
  79        while (free < end) {
  80                __free_page(free);
  81                free++;
  82        }
  83
  84        return page;
  85}
  86
  87static void __dma_free(struct device *dev, size_t size,
  88                       struct page *page, dma_addr_t handle)
  89{
  90        struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
  91
  92        while (page < end)
  93                __free_page(page++);
  94}
  95
  96void *dma_alloc_coherent(struct device *dev, size_t size,
  97                         dma_addr_t *handle, gfp_t gfp)
  98{
  99        struct page *page;
 100        void *ret = NULL;
 101
 102        page = __dma_alloc(dev, size, handle, gfp);
 103        if (page)
 104                ret = phys_to_uncached(page_to_phys(page));
 105
 106        return ret;
 107}
 108EXPORT_SYMBOL(dma_alloc_coherent);
 109
 110void dma_free_coherent(struct device *dev, size_t size,
 111                       void *cpu_addr, dma_addr_t handle)
 112{
 113        void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
 114        struct page *page;
 115
 116        pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
 117                 cpu_addr, (unsigned long)handle, (unsigned)size);
 118        BUG_ON(!virt_addr_valid(addr));
 119        page = virt_to_page(addr);
 120        __dma_free(dev, size, page, handle);
 121}
 122EXPORT_SYMBOL(dma_free_coherent);
 123
 124void *dma_alloc_writecombine(struct device *dev, size_t size,
 125                             dma_addr_t *handle, gfp_t gfp)
 126{
 127        struct page *page;
 128        dma_addr_t phys;
 129
 130        page = __dma_alloc(dev, size, handle, gfp);
 131        if (!page)
 132                return NULL;
 133
 134        phys = page_to_phys(page);
 135        *handle = phys;
 136
 137        /* Now, map the page into P3 with write-combining turned on */
 138        return __ioremap(phys, size, _PAGE_BUFFER);
 139}
 140EXPORT_SYMBOL(dma_alloc_writecombine);
 141
 142void dma_free_writecombine(struct device *dev, size_t size,
 143                           void *cpu_addr, dma_addr_t handle)
 144{
 145        struct page *page;
 146
 147        iounmap(cpu_addr);
 148
 149        page = phys_to_page(handle);
 150        __dma_free(dev, size, page, handle);
 151}
 152EXPORT_SYMBOL(dma_free_writecombine);
 153