linux/arch/mn10300/mm/dma-alloc.c
<<
>>
Prefs
   1/* MN10300 Dynamic DMA mapping support
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 * Derived from: arch/i386/kernel/pci-dma.c
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public Licence
   9 * as published by the Free Software Foundation; either version
  10 * 2 of the Licence, or (at your option) any later version.
  11 */
  12
  13#include <linux/types.h>
  14#include <linux/mm.h>
  15#include <linux/string.h>
  16#include <linux/pci.h>
  17#include <linux/gfp.h>
  18#include <linux/export.h>
  19#include <asm/io.h>
  20
  21static unsigned long pci_sram_allocated = 0xbc000000;
  22
  23static void *mn10300_dma_alloc(struct device *dev, size_t size,
  24                dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
  25{
  26        unsigned long addr;
  27        void *ret;
  28
  29        pr_debug("dma_alloc_coherent(%s,%zu,%x)\n",
  30                 dev ? dev_name(dev) : "?", size, gfp);
  31
  32        if (0xbe000000 - pci_sram_allocated >= size) {
  33                size = (size + 255) & ~255;
  34                addr = pci_sram_allocated;
  35                pci_sram_allocated += size;
  36                ret = (void *) addr;
  37                goto done;
  38        }
  39
  40        /* ignore region specifiers */
  41        gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
  42
  43        if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
  44                gfp |= GFP_DMA;
  45
  46        addr = __get_free_pages(gfp, get_order(size));
  47        if (!addr)
  48                return NULL;
  49
  50        /* map the coherent memory through the uncached memory window */
  51        ret = (void *) (addr | 0x20000000);
  52
  53        /* fill the memory with obvious rubbish */
  54        memset((void *) addr, 0xfb, size);
  55
  56        /* write back and evict all cache lines covering this region */
  57        mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE);
  58
  59done:
  60        *dma_handle = virt_to_bus((void *) addr);
  61        printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle);
  62        return ret;
  63}
  64
  65static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr,
  66                dma_addr_t dma_handle, struct dma_attrs *attrs)
  67{
  68        unsigned long addr = (unsigned long) vaddr & ~0x20000000;
  69
  70        if (addr >= 0x9c000000)
  71                return;
  72
  73        free_pages(addr, get_order(size));
  74}
  75
  76static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
  77                int nents, enum dma_data_direction direction,
  78                struct dma_attrs *attrs)
  79{
  80        struct scatterlist *sg;
  81        int i;
  82
  83        for_each_sg(sglist, sg, nents, i) {
  84                BUG_ON(!sg_page(sg));
  85
  86                sg->dma_address = sg_phys(sg);
  87        }
  88
  89        mn10300_dcache_flush_inv();
  90        return nents;
  91}
  92
  93static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page,
  94                unsigned long offset, size_t size,
  95                enum dma_data_direction direction, struct dma_attrs *attrs)
  96{
  97        return page_to_bus(page) + offset;
  98}
  99
 100static void mn10300_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
 101                                size_t size, enum dma_data_direction direction)
 102{
 103        mn10300_dcache_flush_inv();
 104}
 105
 106static void mn10300_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 107                            int nelems, enum dma_data_direction direction)
 108{
 109        mn10300_dcache_flush_inv();
 110}
 111
 112static int mn10300_dma_supported(struct device *dev, u64 mask)
 113{
 114        /*
 115         * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
 116         * guarantee allocations that must be within a tighter range than
 117         * GFP_DMA
 118         */
 119        if (mask < 0x00ffffff)
 120                return 0;
 121        return 1;
 122}
 123
 124struct dma_map_ops mn10300_dma_ops = {
 125        .alloc                  = mn10300_dma_alloc,
 126        .free                   = mn10300_dma_free,
 127        .map_page               = mn10300_dma_map_page,
 128        .map_sg                 = mn10300_dma_map_sg,
 129        .sync_single_for_device = mn10300_dma_sync_single_for_device,
 130        .sync_sg_for_device     = mn10300_dma_sync_sg_for_device,
 131};
 132