linux/arch/hexagon/kernel/dma.c
<<
>>
Prefs
   1/*
   2 * DMA implementation for Hexagon
   3 *
   4 * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 and
   8 * only version 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18 * 02110-1301, USA.
  19 */
  20
  21#include <linux/dma-mapping.h>
  22#include <linux/bootmem.h>
  23#include <linux/genalloc.h>
  24#include <asm/dma-mapping.h>
  25#include <linux/module.h>
  26#include <asm/page.h>
  27
  28struct dma_map_ops *dma_ops;
  29EXPORT_SYMBOL(dma_ops);
  30
  31int bad_dma_address;  /*  globals are automatically initialized to zero  */
  32
  33static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
  34{
  35        return phys_to_virt((unsigned long) dma_addr);
  36}
  37
  38int dma_supported(struct device *dev, u64 mask)
  39{
  40        if (mask == DMA_BIT_MASK(32))
  41                return 1;
  42        else
  43                return 0;
  44}
  45EXPORT_SYMBOL(dma_supported);
  46
  47static struct gen_pool *coherent_pool;
  48
  49
  50/* Allocates from a pool of uncached memory that was reserved at boot time */
  51
  52static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
  53                                 dma_addr_t *dma_addr, gfp_t flag,
  54                                 struct dma_attrs *attrs)
  55{
  56        void *ret;
  57
  58        /*
  59         * Our max_low_pfn should have been backed off by 16MB in
  60         * mm/init.c to create DMA coherent space.  Use that as the VA
  61         * for the pool.
  62         */
  63
  64        if (coherent_pool == NULL) {
  65                coherent_pool = gen_pool_create(PAGE_SHIFT, -1);
  66
  67                if (coherent_pool == NULL)
  68                        panic("Can't create %s() memory pool!", __func__);
  69                else
  70                        gen_pool_add(coherent_pool,
  71                                pfn_to_virt(max_low_pfn),
  72                                hexagon_coherent_pool_size, -1);
  73        }
  74
  75        ret = (void *) gen_pool_alloc(coherent_pool, size);
  76
  77        if (ret) {
  78                memset(ret, 0, size);
  79                *dma_addr = (dma_addr_t) virt_to_phys(ret);
  80        } else
  81                *dma_addr = ~0;
  82
  83        return ret;
  84}
  85
  86static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
  87                                  dma_addr_t dma_addr, struct dma_attrs *attrs)
  88{
  89        gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
  90}
  91
  92static int check_addr(const char *name, struct device *hwdev,
  93                      dma_addr_t bus, size_t size)
  94{
  95        if (hwdev && hwdev->dma_mask && !dma_capable(hwdev, bus, size)) {
  96                if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
  97                        printk(KERN_ERR
  98                                "%s: overflow %Lx+%zu of device mask %Lx\n",
  99                                name, (long long)bus, size,
 100                                (long long)*hwdev->dma_mask);
 101                return 0;
 102        }
 103        return 1;
 104}
 105
 106static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
 107                          int nents, enum dma_data_direction dir,
 108                          struct dma_attrs *attrs)
 109{
 110        struct scatterlist *s;
 111        int i;
 112
 113        WARN_ON(nents == 0 || sg[0].length == 0);
 114
 115        for_each_sg(sg, s, nents, i) {
 116                s->dma_address = sg_phys(s);
 117                if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
 118                        return 0;
 119
 120                s->dma_length = s->length;
 121
 122                flush_dcache_range(dma_addr_to_virt(s->dma_address),
 123                                   dma_addr_to_virt(s->dma_address + s->length));
 124        }
 125
 126        return nents;
 127}
 128
 129/*
 130 * address is virtual
 131 */
 132static inline void dma_sync(void *addr, size_t size,
 133                            enum dma_data_direction dir)
 134{
 135        switch (dir) {
 136        case DMA_TO_DEVICE:
 137                hexagon_clean_dcache_range((unsigned long) addr,
 138                (unsigned long) addr + size);
 139                break;
 140        case DMA_FROM_DEVICE:
 141                hexagon_inv_dcache_range((unsigned long) addr,
 142                (unsigned long) addr + size);
 143                break;
 144        case DMA_BIDIRECTIONAL:
 145                flush_dcache_range((unsigned long) addr,
 146                (unsigned long) addr + size);
 147                break;
 148        default:
 149                BUG();
 150        }
 151}
 152
 153/**
 154 * hexagon_map_page() - maps an address for device DMA
 155 * @dev:        pointer to DMA device
 156 * @page:       pointer to page struct of DMA memory
 157 * @offset:     offset within page
 158 * @size:       size of memory to map
 159 * @dir:        transfer direction
 160 * @attrs:      pointer to DMA attrs (not used)
 161 *
 162 * Called to map a memory address to a DMA address prior
 163 * to accesses to/from device.
 164 *
 165 * We don't particularly have many hoops to jump through
 166 * so far.  Straight translation between phys and virtual.
 167 *
 168 * DMA is not cache coherent so sync is necessary; this
 169 * seems to be a convenient place to do it.
 170 *
 171 */
 172static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
 173                                   unsigned long offset, size_t size,
 174                                   enum dma_data_direction dir,
 175                                   struct dma_attrs *attrs)
 176{
 177        dma_addr_t bus = page_to_phys(page) + offset;
 178        WARN_ON(size == 0);
 179
 180        if (!check_addr("map_single", dev, bus, size))
 181                return bad_dma_address;
 182
 183        dma_sync(dma_addr_to_virt(bus), size, dir);
 184
 185        return bus;
 186}
 187
 188static void hexagon_sync_single_for_cpu(struct device *dev,
 189                                        dma_addr_t dma_handle, size_t size,
 190                                        enum dma_data_direction dir)
 191{
 192        dma_sync(dma_addr_to_virt(dma_handle), size, dir);
 193}
 194
 195static void hexagon_sync_single_for_device(struct device *dev,
 196                                        dma_addr_t dma_handle, size_t size,
 197                                        enum dma_data_direction dir)
 198{
 199        dma_sync(dma_addr_to_virt(dma_handle), size, dir);
 200}
 201
 202struct dma_map_ops hexagon_dma_ops = {
 203        .alloc          = hexagon_dma_alloc_coherent,
 204        .free           = hexagon_free_coherent,
 205        .map_sg         = hexagon_map_sg,
 206        .map_page       = hexagon_map_page,
 207        .sync_single_for_cpu = hexagon_sync_single_for_cpu,
 208        .sync_single_for_device = hexagon_sync_single_for_device,
 209        .is_phys        = 1,
 210};
 211
 212void __init hexagon_dma_init(void)
 213{
 214        if (dma_ops)
 215                return;
 216
 217        dma_ops = &hexagon_dma_ops;
 218}
 219