linux/arch/hexagon/kernel/dma.c
<<
>>
Prefs
   1/*
   2 * DMA implementation for Hexagon
   3 *
   4 * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 and
   8 * only version 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18 * 02110-1301, USA.
  19 */
  20
  21#include <linux/dma-mapping.h>
  22#include <linux/bootmem.h>
  23#include <linux/genalloc.h>
  24#include <asm/dma-mapping.h>
  25#include <linux/module.h>
  26#include <asm/page.h>
  27
  28#define HEXAGON_MAPPING_ERROR   0
  29
  30const struct dma_map_ops *dma_ops;
  31EXPORT_SYMBOL(dma_ops);
  32
  33static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
  34{
  35        return phys_to_virt((unsigned long) dma_addr);
  36}
  37
  38static struct gen_pool *coherent_pool;
  39
  40
  41/* Allocates from a pool of uncached memory that was reserved at boot time */
  42
  43static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
  44                                 dma_addr_t *dma_addr, gfp_t flag,
  45                                 unsigned long attrs)
  46{
  47        void *ret;
  48
  49        /*
  50         * Our max_low_pfn should have been backed off by 16MB in
  51         * mm/init.c to create DMA coherent space.  Use that as the VA
  52         * for the pool.
  53         */
  54
  55        if (coherent_pool == NULL) {
  56                coherent_pool = gen_pool_create(PAGE_SHIFT, -1);
  57
  58                if (coherent_pool == NULL)
  59                        panic("Can't create %s() memory pool!", __func__);
  60                else
  61                        gen_pool_add(coherent_pool,
  62                                pfn_to_virt(max_low_pfn),
  63                                hexagon_coherent_pool_size, -1);
  64        }
  65
  66        ret = (void *) gen_pool_alloc(coherent_pool, size);
  67
  68        if (ret) {
  69                memset(ret, 0, size);
  70                *dma_addr = (dma_addr_t) virt_to_phys(ret);
  71        } else
  72                *dma_addr = ~0;
  73
  74        return ret;
  75}
  76
  77static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
  78                                  dma_addr_t dma_addr, unsigned long attrs)
  79{
  80        gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
  81}
  82
  83static int check_addr(const char *name, struct device *hwdev,
  84                      dma_addr_t bus, size_t size)
  85{
  86        if (hwdev && hwdev->dma_mask && !dma_capable(hwdev, bus, size)) {
  87                if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
  88                        printk(KERN_ERR
  89                                "%s: overflow %Lx+%zu of device mask %Lx\n",
  90                                name, (long long)bus, size,
  91                                (long long)*hwdev->dma_mask);
  92                return 0;
  93        }
  94        return 1;
  95}
  96
  97static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
  98                          int nents, enum dma_data_direction dir,
  99                          unsigned long attrs)
 100{
 101        struct scatterlist *s;
 102        int i;
 103
 104        WARN_ON(nents == 0 || sg[0].length == 0);
 105
 106        for_each_sg(sg, s, nents, i) {
 107                s->dma_address = sg_phys(s);
 108                if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
 109                        return 0;
 110
 111                s->dma_length = s->length;
 112
 113                if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
 114                        continue;
 115
 116                flush_dcache_range(dma_addr_to_virt(s->dma_address),
 117                                   dma_addr_to_virt(s->dma_address + s->length));
 118        }
 119
 120        return nents;
 121}
 122
 123/*
 124 * address is virtual
 125 */
 126static inline void dma_sync(void *addr, size_t size,
 127                            enum dma_data_direction dir)
 128{
 129        switch (dir) {
 130        case DMA_TO_DEVICE:
 131                hexagon_clean_dcache_range((unsigned long) addr,
 132                (unsigned long) addr + size);
 133                break;
 134        case DMA_FROM_DEVICE:
 135                hexagon_inv_dcache_range((unsigned long) addr,
 136                (unsigned long) addr + size);
 137                break;
 138        case DMA_BIDIRECTIONAL:
 139                flush_dcache_range((unsigned long) addr,
 140                (unsigned long) addr + size);
 141                break;
 142        default:
 143                BUG();
 144        }
 145}
 146
 147/**
 148 * hexagon_map_page() - maps an address for device DMA
 149 * @dev:        pointer to DMA device
 150 * @page:       pointer to page struct of DMA memory
 151 * @offset:     offset within page
 152 * @size:       size of memory to map
 153 * @dir:        transfer direction
 154 * @attrs:      pointer to DMA attrs (not used)
 155 *
 156 * Called to map a memory address to a DMA address prior
 157 * to accesses to/from device.
 158 *
 159 * We don't particularly have many hoops to jump through
 160 * so far.  Straight translation between phys and virtual.
 161 *
 162 * DMA is not cache coherent so sync is necessary; this
 163 * seems to be a convenient place to do it.
 164 *
 165 */
 166static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
 167                                   unsigned long offset, size_t size,
 168                                   enum dma_data_direction dir,
 169                                   unsigned long attrs)
 170{
 171        dma_addr_t bus = page_to_phys(page) + offset;
 172        WARN_ON(size == 0);
 173
 174        if (!check_addr("map_single", dev, bus, size))
 175                return HEXAGON_MAPPING_ERROR;
 176
 177        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 178                dma_sync(dma_addr_to_virt(bus), size, dir);
 179
 180        return bus;
 181}
 182
 183static void hexagon_sync_single_for_cpu(struct device *dev,
 184                                        dma_addr_t dma_handle, size_t size,
 185                                        enum dma_data_direction dir)
 186{
 187        dma_sync(dma_addr_to_virt(dma_handle), size, dir);
 188}
 189
 190static void hexagon_sync_single_for_device(struct device *dev,
 191                                        dma_addr_t dma_handle, size_t size,
 192                                        enum dma_data_direction dir)
 193{
 194        dma_sync(dma_addr_to_virt(dma_handle), size, dir);
 195}
 196
 197static int hexagon_mapping_error(struct device *dev, dma_addr_t dma_addr)
 198{
 199        return dma_addr == HEXAGON_MAPPING_ERROR;
 200}
 201
 202const struct dma_map_ops hexagon_dma_ops = {
 203        .alloc          = hexagon_dma_alloc_coherent,
 204        .free           = hexagon_free_coherent,
 205        .map_sg         = hexagon_map_sg,
 206        .map_page       = hexagon_map_page,
 207        .sync_single_for_cpu = hexagon_sync_single_for_cpu,
 208        .sync_single_for_device = hexagon_sync_single_for_device,
 209        .mapping_error  = hexagon_mapping_error,
 210        .is_phys        = 1,
 211};
 212
 213void __init hexagon_dma_init(void)
 214{
 215        if (dma_ops)
 216                return;
 217
 218        dma_ops = &hexagon_dma_ops;
 219}
 220