linux/arch/avr32/mm/dma-coherent.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2004-2006 Atmel Corporation
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/dma-mapping.h>
  10
  11#include <asm/addrspace.h>
  12#include <asm/cacheflush.h>
  13
  14void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
  15{
  16        /*
  17         * No need to sync an uncached area
  18         */
  19        if (PXSEG(vaddr) == P2SEG)
  20                return;
  21
  22        switch (direction) {
  23        case DMA_FROM_DEVICE:           /* invalidate only */
  24                invalidate_dcache_region(vaddr, size);
  25                break;
  26        case DMA_TO_DEVICE:             /* writeback only */
  27                clean_dcache_region(vaddr, size);
  28                break;
  29        case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
  30                flush_dcache_region(vaddr, size);
  31                break;
  32        default:
  33                BUG();
  34        }
  35}
  36EXPORT_SYMBOL(dma_cache_sync);
  37
  38static struct page *__dma_alloc(struct device *dev, size_t size,
  39                                dma_addr_t *handle, gfp_t gfp)
  40{
  41        struct page *page, *free, *end;
  42        int order;
  43
  44        /* Following is a work-around (a.k.a. hack) to prevent pages
  45         * with __GFP_COMP being passed to split_page() which cannot
  46         * handle them.  The real problem is that this flag probably
  47         * should be 0 on AVR32 as it is not supported on this
  48         * platform--see CONFIG_HUGETLB_PAGE. */
  49        gfp &= ~(__GFP_COMP);
  50
  51        size = PAGE_ALIGN(size);
  52        order = get_order(size);
  53
  54        page = alloc_pages(gfp, order);
  55        if (!page)
  56                return NULL;
  57        split_page(page, order);
  58
  59        /*
  60         * When accessing physical memory with valid cache data, we
  61         * get a cache hit even if the virtual memory region is marked
  62         * as uncached.
  63         *
  64         * Since the memory is newly allocated, there is no point in
  65         * doing a writeback. If the previous owner cares, he should
  66         * have flushed the cache before releasing the memory.
  67         */
  68        invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
  69
  70        *handle = page_to_bus(page);
  71        free = page + (size >> PAGE_SHIFT);
  72        end = page + (1 << order);
  73
  74        /*
  75         * Free any unused pages
  76         */
  77        while (free < end) {
  78                __free_page(free);
  79                free++;
  80        }
  81
  82        return page;
  83}
  84
  85static void __dma_free(struct device *dev, size_t size,
  86                       struct page *page, dma_addr_t handle)
  87{
  88        struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
  89
  90        while (page < end)
  91                __free_page(page++);
  92}
  93
  94void *dma_alloc_coherent(struct device *dev, size_t size,
  95                         dma_addr_t *handle, gfp_t gfp)
  96{
  97        struct page *page;
  98        void *ret = NULL;
  99
 100        page = __dma_alloc(dev, size, handle, gfp);
 101        if (page)
 102                ret = phys_to_uncached(page_to_phys(page));
 103
 104        return ret;
 105}
 106EXPORT_SYMBOL(dma_alloc_coherent);
 107
 108void dma_free_coherent(struct device *dev, size_t size,
 109                       void *cpu_addr, dma_addr_t handle)
 110{
 111        void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
 112        struct page *page;
 113
 114        pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
 115                 cpu_addr, (unsigned long)handle, (unsigned)size);
 116        BUG_ON(!virt_addr_valid(addr));
 117        page = virt_to_page(addr);
 118        __dma_free(dev, size, page, handle);
 119}
 120EXPORT_SYMBOL(dma_free_coherent);
 121
 122void *dma_alloc_writecombine(struct device *dev, size_t size,
 123                             dma_addr_t *handle, gfp_t gfp)
 124{
 125        struct page *page;
 126        dma_addr_t phys;
 127
 128        page = __dma_alloc(dev, size, handle, gfp);
 129        if (!page)
 130                return NULL;
 131
 132        phys = page_to_phys(page);
 133        *handle = phys;
 134
 135        /* Now, map the page into P3 with write-combining turned on */
 136        return __ioremap(phys, size, _PAGE_BUFFER);
 137}
 138EXPORT_SYMBOL(dma_alloc_writecombine);
 139
 140void dma_free_writecombine(struct device *dev, size_t size,
 141                           void *cpu_addr, dma_addr_t handle)
 142{
 143        struct page *page;
 144
 145        iounmap(cpu_addr);
 146
 147        page = phys_to_page(handle);
 148        __dma_free(dev, size, page, handle);
 149}
 150EXPORT_SYMBOL(dma_free_writecombine);
 151