linux/arch/sh/mm/consistent.c
<<
>>
Prefs
   1/*
   2 * arch/sh/mm/consistent.c
   3 *
   4 * Copyright (C) 2004 - 2007  Paul Mundt
   5 *
   6 * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
   7 *
   8 * This file is subject to the terms and conditions of the GNU General Public
   9 * License.  See the file "COPYING" in the main directory of this archive
  10 * for more details.
  11 */
  12#include <linux/mm.h>
  13#include <linux/init.h>
  14#include <linux/platform_device.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/dma-debug.h>
  17#include <linux/io.h>
  18#include <linux/module.h>
  19#include <linux/gfp.h>
  20#include <asm/cacheflush.h>
  21#include <asm/addrspace.h>
  22
  23#define PREALLOC_DMA_DEBUG_ENTRIES      4096
  24
  25struct dma_map_ops *dma_ops;
  26EXPORT_SYMBOL(dma_ops);
  27
  28static int __init dma_init(void)
  29{
  30        dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
  31        return 0;
  32}
  33fs_initcall(dma_init);
  34
  35void *dma_generic_alloc_coherent(struct device *dev, size_t size,
  36                                 dma_addr_t *dma_handle, gfp_t gfp,
  37                                 struct dma_attrs *attrs)
  38{
  39        void *ret, *ret_nocache;
  40        int order = get_order(size);
  41
  42        gfp |= __GFP_ZERO;
  43
  44        ret = (void *)__get_free_pages(gfp, order);
  45        if (!ret)
  46                return NULL;
  47
  48        /*
  49         * Pages from the page allocator may have data present in
  50         * cache. So flush the cache before using uncached memory.
  51         */
  52        dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
  53
  54        ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
  55        if (!ret_nocache) {
  56                free_pages((unsigned long)ret, order);
  57                return NULL;
  58        }
  59
  60        split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
  61
  62        *dma_handle = virt_to_phys(ret);
  63
  64        return ret_nocache;
  65}
  66
  67void dma_generic_free_coherent(struct device *dev, size_t size,
  68                               void *vaddr, dma_addr_t dma_handle,
  69                               struct dma_attrs *attrs)
  70{
  71        int order = get_order(size);
  72        unsigned long pfn = dma_handle >> PAGE_SHIFT;
  73        int k;
  74
  75        for (k = 0; k < (1 << order); k++)
  76                __free_pages(pfn_to_page(pfn + k), 0);
  77
  78        iounmap(vaddr);
  79}
  80
  81void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  82                    enum dma_data_direction direction)
  83{
  84        void *addr;
  85
  86        addr = __in_29bit_mode() ?
  87               (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
  88
  89        switch (direction) {
  90        case DMA_FROM_DEVICE:           /* invalidate only */
  91                __flush_invalidate_region(addr, size);
  92                break;
  93        case DMA_TO_DEVICE:             /* writeback only */
  94                __flush_wback_region(addr, size);
  95                break;
  96        case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
  97                __flush_purge_region(addr, size);
  98                break;
  99        default:
 100                BUG();
 101        }
 102}
 103EXPORT_SYMBOL(dma_cache_sync);
 104
 105static int __init memchunk_setup(char *str)
 106{
 107        return 1; /* accept anything that begins with "memchunk." */
 108}
 109__setup("memchunk.", memchunk_setup);
 110
 111static void __init memchunk_cmdline_override(char *name, unsigned long *sizep)
 112{
 113        char *p = boot_command_line;
 114        int k = strlen(name);
 115
 116        while ((p = strstr(p, "memchunk."))) {
 117                p += 9; /* strlen("memchunk.") */
 118                if (!strncmp(name, p, k) && p[k] == '=') {
 119                        p += k + 1;
 120                        *sizep = memparse(p, NULL);
 121                        pr_info("%s: forcing memory chunk size to 0x%08lx\n",
 122                                name, *sizep);
 123                        break;
 124                }
 125        }
 126}
 127
 128int __init platform_resource_setup_memory(struct platform_device *pdev,
 129                                          char *name, unsigned long memsize)
 130{
 131        struct resource *r;
 132        dma_addr_t dma_handle;
 133        void *buf;
 134
 135        r = pdev->resource + pdev->num_resources - 1;
 136        if (r->flags) {
 137                pr_warning("%s: unable to find empty space for resource\n",
 138                        name);
 139                return -EINVAL;
 140        }
 141
 142        memchunk_cmdline_override(name, &memsize);
 143        if (!memsize)
 144                return 0;
 145
 146        buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
 147        if (!buf) {
 148                pr_warning("%s: unable to allocate memory\n", name);
 149                return -ENOMEM;
 150        }
 151
 152        memset(buf, 0, memsize);
 153
 154        r->flags = IORESOURCE_MEM;
 155        r->start = dma_handle;
 156        r->end = r->start + memsize - 1;
 157        r->name = name;
 158        return 0;
 159}
 160