linux/arch/hexagon/kernel/dma.c
<<
>>
Prefs
   1/*
   2 * DMA implementation for Hexagon
   3 *
   4 * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 and
   8 * only version 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18 * 02110-1301, USA.
  19 */
  20
  21#include <linux/dma-mapping.h>
  22#include <linux/dma-direct.h>
  23#include <linux/bootmem.h>
  24#include <linux/genalloc.h>
  25#include <asm/dma-mapping.h>
  26#include <linux/module.h>
  27#include <asm/page.h>
  28
  29#define HEXAGON_MAPPING_ERROR   0
  30
  31const struct dma_map_ops *dma_ops;
  32EXPORT_SYMBOL(dma_ops);
  33
  34static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
  35{
  36        return phys_to_virt((unsigned long) dma_addr);
  37}
  38
  39static struct gen_pool *coherent_pool;
  40
  41
  42/* Allocates from a pool of uncached memory that was reserved at boot time */
  43
  44static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
  45                                 dma_addr_t *dma_addr, gfp_t flag,
  46                                 unsigned long attrs)
  47{
  48        void *ret;
  49
  50        /*
  51         * Our max_low_pfn should have been backed off by 16MB in
  52         * mm/init.c to create DMA coherent space.  Use that as the VA
  53         * for the pool.
  54         */
  55
  56        if (coherent_pool == NULL) {
  57                coherent_pool = gen_pool_create(PAGE_SHIFT, -1);
  58
  59                if (coherent_pool == NULL)
  60                        panic("Can't create %s() memory pool!", __func__);
  61                else
  62                        gen_pool_add(coherent_pool,
  63                                pfn_to_virt(max_low_pfn),
  64                                hexagon_coherent_pool_size, -1);
  65        }
  66
  67        ret = (void *) gen_pool_alloc(coherent_pool, size);
  68
  69        if (ret) {
  70                memset(ret, 0, size);
  71                *dma_addr = (dma_addr_t) virt_to_phys(ret);
  72        } else
  73                *dma_addr = ~0;
  74
  75        return ret;
  76}
  77
  78static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
  79                                  dma_addr_t dma_addr, unsigned long attrs)
  80{
  81        gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
  82}
  83
  84static int check_addr(const char *name, struct device *hwdev,
  85                      dma_addr_t bus, size_t size)
  86{
  87        if (hwdev && hwdev->dma_mask && !dma_capable(hwdev, bus, size)) {
  88                if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
  89                        printk(KERN_ERR
  90                                "%s: overflow %Lx+%zu of device mask %Lx\n",
  91                                name, (long long)bus, size,
  92                                (long long)*hwdev->dma_mask);
  93                return 0;
  94        }
  95        return 1;
  96}
  97
  98static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
  99                          int nents, enum dma_data_direction dir,
 100                          unsigned long attrs)
 101{
 102        struct scatterlist *s;
 103        int i;
 104
 105        WARN_ON(nents == 0 || sg[0].length == 0);
 106
 107        for_each_sg(sg, s, nents, i) {
 108                s->dma_address = sg_phys(s);
 109                if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
 110                        return 0;
 111
 112                s->dma_length = s->length;
 113
 114                if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
 115                        continue;
 116
 117                flush_dcache_range(dma_addr_to_virt(s->dma_address),
 118                                   dma_addr_to_virt(s->dma_address + s->length));
 119        }
 120
 121        return nents;
 122}
 123
 124/*
 125 * address is virtual
 126 */
 127static inline void dma_sync(void *addr, size_t size,
 128                            enum dma_data_direction dir)
 129{
 130        switch (dir) {
 131        case DMA_TO_DEVICE:
 132                hexagon_clean_dcache_range((unsigned long) addr,
 133                (unsigned long) addr + size);
 134                break;
 135        case DMA_FROM_DEVICE:
 136                hexagon_inv_dcache_range((unsigned long) addr,
 137                (unsigned long) addr + size);
 138                break;
 139        case DMA_BIDIRECTIONAL:
 140                flush_dcache_range((unsigned long) addr,
 141                (unsigned long) addr + size);
 142                break;
 143        default:
 144                BUG();
 145        }
 146}
 147
 148/**
 149 * hexagon_map_page() - maps an address for device DMA
 150 * @dev:        pointer to DMA device
 151 * @page:       pointer to page struct of DMA memory
 152 * @offset:     offset within page
 153 * @size:       size of memory to map
 154 * @dir:        transfer direction
 155 * @attrs:      pointer to DMA attrs (not used)
 156 *
 157 * Called to map a memory address to a DMA address prior
 158 * to accesses to/from device.
 159 *
 160 * We don't particularly have many hoops to jump through
 161 * so far.  Straight translation between phys and virtual.
 162 *
 163 * DMA is not cache coherent so sync is necessary; this
 164 * seems to be a convenient place to do it.
 165 *
 166 */
 167static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
 168                                   unsigned long offset, size_t size,
 169                                   enum dma_data_direction dir,
 170                                   unsigned long attrs)
 171{
 172        dma_addr_t bus = page_to_phys(page) + offset;
 173        WARN_ON(size == 0);
 174
 175        if (!check_addr("map_single", dev, bus, size))
 176                return HEXAGON_MAPPING_ERROR;
 177
 178        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 179                dma_sync(dma_addr_to_virt(bus), size, dir);
 180
 181        return bus;
 182}
 183
 184static void hexagon_sync_single_for_cpu(struct device *dev,
 185                                        dma_addr_t dma_handle, size_t size,
 186                                        enum dma_data_direction dir)
 187{
 188        dma_sync(dma_addr_to_virt(dma_handle), size, dir);
 189}
 190
 191static void hexagon_sync_single_for_device(struct device *dev,
 192                                        dma_addr_t dma_handle, size_t size,
 193                                        enum dma_data_direction dir)
 194{
 195        dma_sync(dma_addr_to_virt(dma_handle), size, dir);
 196}
 197
 198static int hexagon_mapping_error(struct device *dev, dma_addr_t dma_addr)
 199{
 200        return dma_addr == HEXAGON_MAPPING_ERROR;
 201}
 202
 203const struct dma_map_ops hexagon_dma_ops = {
 204        .alloc          = hexagon_dma_alloc_coherent,
 205        .free           = hexagon_free_coherent,
 206        .map_sg         = hexagon_map_sg,
 207        .map_page       = hexagon_map_page,
 208        .sync_single_for_cpu = hexagon_sync_single_for_cpu,
 209        .sync_single_for_device = hexagon_sync_single_for_device,
 210        .mapping_error  = hexagon_mapping_error,
 211        .is_phys        = 1,
 212};
 213
 214void __init hexagon_dma_init(void)
 215{
 216        if (dma_ops)
 217                return;
 218
 219        dma_ops = &hexagon_dma_ops;
 220}
 221