linux/arch/c6x/kernel/dma.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2011 Texas Instruments Incorporated
   3 *  Author: Mark Salter <msalter@redhat.com>
   4 *
   5 *  This program is free software; you can redistribute it and/or modify
   6 *  it under the terms of the GNU General Public License version 2 as
   7 *  published by the Free Software Foundation.
   8 */
   9#include <linux/module.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/mm.h>
  12#include <linux/mm_types.h>
  13#include <linux/scatterlist.h>
  14
  15#include <asm/cacheflush.h>
  16
  17static void c6x_dma_sync(dma_addr_t handle, size_t size,
  18                         enum dma_data_direction dir)
  19{
  20        unsigned long paddr = handle;
  21
  22        BUG_ON(!valid_dma_direction(dir));
  23
  24        switch (dir) {
  25        case DMA_FROM_DEVICE:
  26                L2_cache_block_invalidate(paddr, paddr + size);
  27                break;
  28        case DMA_TO_DEVICE:
  29                L2_cache_block_writeback(paddr, paddr + size);
  30                break;
  31        case DMA_BIDIRECTIONAL:
  32                L2_cache_block_writeback_invalidate(paddr, paddr + size);
  33                break;
  34        default:
  35                break;
  36        }
  37}
  38
  39static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
  40                unsigned long offset, size_t size, enum dma_data_direction dir,
  41                unsigned long attrs)
  42{
  43        dma_addr_t handle = virt_to_phys(page_address(page) + offset);
  44
  45        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  46                c6x_dma_sync(handle, size, dir);
  47
  48        return handle;
  49}
  50
  51static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
  52                size_t size, enum dma_data_direction dir, unsigned long attrs)
  53{
  54        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  55                c6x_dma_sync(handle, size, dir);
  56}
  57
  58static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
  59                int nents, enum dma_data_direction dir, unsigned long attrs)
  60{
  61        struct scatterlist *sg;
  62        int i;
  63
  64        for_each_sg(sglist, sg, nents, i) {
  65                sg->dma_address = sg_phys(sg);
  66                if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  67                        c6x_dma_sync(sg->dma_address, sg->length, dir);
  68        }
  69
  70        return nents;
  71}
  72
  73static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
  74                  int nents, enum dma_data_direction dir, unsigned long attrs)
  75{
  76        struct scatterlist *sg;
  77        int i;
  78
  79        if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
  80                return;
  81
  82        for_each_sg(sglist, sg, nents, i)
  83                c6x_dma_sync(sg_dma_address(sg), sg->length, dir);
  84}
  85
  86static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
  87                size_t size, enum dma_data_direction dir)
  88{
  89        c6x_dma_sync(handle, size, dir);
  90
  91}
  92
  93static void c6x_dma_sync_single_for_device(struct device *dev,
  94                dma_addr_t handle, size_t size, enum dma_data_direction dir)
  95{
  96        c6x_dma_sync(handle, size, dir);
  97
  98}
  99
 100static void c6x_dma_sync_sg_for_cpu(struct device *dev,
 101                struct scatterlist *sglist, int nents,
 102                enum dma_data_direction dir)
 103{
 104        struct scatterlist *sg;
 105        int i;
 106
 107        for_each_sg(sglist, sg, nents, i)
 108                c6x_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
 109                                        sg->length, dir);
 110
 111}
 112
 113static void c6x_dma_sync_sg_for_device(struct device *dev,
 114                struct scatterlist *sglist, int nents,
 115                enum dma_data_direction dir)
 116{
 117        struct scatterlist *sg;
 118        int i;
 119
 120        for_each_sg(sglist, sg, nents, i)
 121                c6x_dma_sync_single_for_device(dev, sg_dma_address(sg),
 122                                           sg->length, dir);
 123
 124}
 125
 126const struct dma_map_ops c6x_dma_ops = {
 127        .alloc                  = c6x_dma_alloc,
 128        .free                   = c6x_dma_free,
 129        .map_page               = c6x_dma_map_page,
 130        .unmap_page             = c6x_dma_unmap_page,
 131        .map_sg                 = c6x_dma_map_sg,
 132        .unmap_sg               = c6x_dma_unmap_sg,
 133        .sync_single_for_device = c6x_dma_sync_single_for_device,
 134        .sync_single_for_cpu    = c6x_dma_sync_single_for_cpu,
 135        .sync_sg_for_device     = c6x_dma_sync_sg_for_device,
 136        .sync_sg_for_cpu        = c6x_dma_sync_sg_for_cpu,
 137};
 138EXPORT_SYMBOL(c6x_dma_ops);
 139
 140/* Number of entries preallocated for DMA-API debugging */
 141#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
 142
 143static int __init dma_init(void)
 144{
 145        dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
 146
 147        return 0;
 148}
 149fs_initcall(dma_init);
 150