linux/arch/frv/mb93090-mb00/pci-dma.c
<<
>>
Prefs
   1/* pci-dma.c: Dynamic DMA mapping support for the FRV CPUs that have MMUs
   2 *
   3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11
  12#include <linux/types.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/list.h>
  15#include <linux/pci.h>
  16#include <linux/export.h>
  17#include <linux/highmem.h>
  18#include <linux/scatterlist.h>
  19#include <asm/io.h>
  20
  21static void *frv_dma_alloc(struct device *hwdev, size_t size,
  22                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
  23{
  24        void *ret;
  25
  26        ret = consistent_alloc(gfp, size, dma_handle);
  27        if (ret)
  28                memset(ret, 0, size);
  29
  30        return ret;
  31}
  32
  33static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
  34                dma_addr_t dma_handle, unsigned long attrs)
  35{
  36        consistent_free(vaddr);
  37}
  38
  39static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
  40                int nents, enum dma_data_direction direction,
  41                unsigned long attrs)
  42{
  43        struct scatterlist *sg;
  44        unsigned long dampr2;
  45        void *vaddr;
  46        int i;
  47
  48        BUG_ON(direction == DMA_NONE);
  49
  50        if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
  51                return nents;
  52
  53        dampr2 = __get_DAMPR(2);
  54
  55        for_each_sg(sglist, sg, nents, i) {
  56                vaddr = kmap_atomic_primary(sg_page(sg));
  57
  58                frv_dcache_writeback((unsigned long) vaddr,
  59                                     (unsigned long) vaddr + PAGE_SIZE);
  60
  61        }
  62
  63        kunmap_atomic_primary(vaddr);
  64        if (dampr2) {
  65                __set_DAMPR(2, dampr2);
  66                __set_IAMPR(2, dampr2);
  67        }
  68
  69        return nents;
  70}
  71
  72static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
  73                unsigned long offset, size_t size,
  74                enum dma_data_direction direction, unsigned long attrs)
  75{
  76        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  77                flush_dcache_page(page);
  78
  79        return (dma_addr_t) page_to_phys(page) + offset;
  80}
  81
  82static void frv_dma_sync_single_for_device(struct device *dev,
  83                dma_addr_t dma_handle, size_t size,
  84                enum dma_data_direction direction)
  85{
  86        flush_write_buffers();
  87}
  88
  89static void frv_dma_sync_sg_for_device(struct device *dev,
  90                struct scatterlist *sg, int nelems,
  91                enum dma_data_direction direction)
  92{
  93        flush_write_buffers();
  94}
  95
  96
  97static int frv_dma_supported(struct device *dev, u64 mask)
  98{
  99        /*
 100         * we fall back to GFP_DMA when the mask isn't all 1s,
 101         * so we can't guarantee allocations that must be
 102         * within a tighter range than GFP_DMA..
 103         */
 104        if (mask < 0x00ffffff)
 105                return 0;
 106        return 1;
 107}
 108
 109const struct dma_map_ops frv_dma_ops = {
 110        .alloc                  = frv_dma_alloc,
 111        .free                   = frv_dma_free,
 112        .map_page               = frv_dma_map_page,
 113        .map_sg                 = frv_dma_map_sg,
 114        .sync_single_for_device = frv_dma_sync_single_for_device,
 115        .sync_sg_for_device     = frv_dma_sync_sg_for_device,
 116        .dma_supported          = frv_dma_supported,
 117};
 118EXPORT_SYMBOL(frv_dma_ops);
 119