linux/arch/microblaze/kernel/dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2009-2010 PetaLogix
   4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
   5 *
   6 * Provide default implementations of the DMA mapping callbacks for
   7 * directly mapped busses.
   8 */
   9
  10#include <linux/device.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/gfp.h>
  13#include <linux/dma-debug.h>
  14#include <linux/export.h>
  15#include <linux/bug.h>
  16#include <asm/cacheflush.h>
  17
  18static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
  19                                       dma_addr_t *dma_handle, gfp_t flag,
  20                                       unsigned long attrs)
  21{
  22        return consistent_alloc(flag, size, dma_handle);
  23}
  24
  25static void dma_nommu_free_coherent(struct device *dev, size_t size,
  26                                     void *vaddr, dma_addr_t dma_handle,
  27                                     unsigned long attrs)
  28{
  29        consistent_free(size, vaddr);
  30}
  31
  32static inline void __dma_sync(unsigned long paddr,
  33                              size_t size, enum dma_data_direction direction)
  34{
  35        switch (direction) {
  36        case DMA_TO_DEVICE:
  37        case DMA_BIDIRECTIONAL:
  38                flush_dcache_range(paddr, paddr + size);
  39                break;
  40        case DMA_FROM_DEVICE:
  41                invalidate_dcache_range(paddr, paddr + size);
  42                break;
  43        default:
  44                BUG();
  45        }
  46}
  47
  48static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
  49                             int nents, enum dma_data_direction direction,
  50                             unsigned long attrs)
  51{
  52        struct scatterlist *sg;
  53        int i;
  54
  55        /* FIXME this part of code is untested */
  56        for_each_sg(sgl, sg, nents, i) {
  57                sg->dma_address = sg_phys(sg);
  58
  59                if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
  60                        continue;
  61
  62                __dma_sync(sg_phys(sg), sg->length, direction);
  63        }
  64
  65        return nents;
  66}
  67
  68static inline dma_addr_t dma_nommu_map_page(struct device *dev,
  69                                             struct page *page,
  70                                             unsigned long offset,
  71                                             size_t size,
  72                                             enum dma_data_direction direction,
  73                                             unsigned long attrs)
  74{
  75        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  76                __dma_sync(page_to_phys(page) + offset, size, direction);
  77        return page_to_phys(page) + offset;
  78}
  79
  80static inline void dma_nommu_unmap_page(struct device *dev,
  81                                         dma_addr_t dma_address,
  82                                         size_t size,
  83                                         enum dma_data_direction direction,
  84                                         unsigned long attrs)
  85{
  86/* There is not necessary to do cache cleanup
  87 *
  88 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
  89 * dma_address is physical address
  90 */
  91        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  92                __dma_sync(dma_address, size, direction);
  93}
  94
  95static inline void
  96dma_nommu_sync_single_for_cpu(struct device *dev,
  97                               dma_addr_t dma_handle, size_t size,
  98                               enum dma_data_direction direction)
  99{
 100        /*
 101         * It's pointless to flush the cache as the memory segment
 102         * is given to the CPU
 103         */
 104
 105        if (direction == DMA_FROM_DEVICE)
 106                __dma_sync(dma_handle, size, direction);
 107}
 108
 109static inline void
 110dma_nommu_sync_single_for_device(struct device *dev,
 111                                  dma_addr_t dma_handle, size_t size,
 112                                  enum dma_data_direction direction)
 113{
 114        /*
 115         * It's pointless to invalidate the cache if the device isn't
 116         * supposed to write to the relevant region
 117         */
 118
 119        if (direction == DMA_TO_DEVICE)
 120                __dma_sync(dma_handle, size, direction);
 121}
 122
 123static inline void
 124dma_nommu_sync_sg_for_cpu(struct device *dev,
 125                           struct scatterlist *sgl, int nents,
 126                           enum dma_data_direction direction)
 127{
 128        struct scatterlist *sg;
 129        int i;
 130
 131        /* FIXME this part of code is untested */
 132        if (direction == DMA_FROM_DEVICE)
 133                for_each_sg(sgl, sg, nents, i)
 134                        __dma_sync(sg->dma_address, sg->length, direction);
 135}
 136
 137static inline void
 138dma_nommu_sync_sg_for_device(struct device *dev,
 139                              struct scatterlist *sgl, int nents,
 140                              enum dma_data_direction direction)
 141{
 142        struct scatterlist *sg;
 143        int i;
 144
 145        /* FIXME this part of code is untested */
 146        if (direction == DMA_TO_DEVICE)
 147                for_each_sg(sgl, sg, nents, i)
 148                        __dma_sync(sg->dma_address, sg->length, direction);
 149}
 150
 151static
 152int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
 153                             void *cpu_addr, dma_addr_t handle, size_t size,
 154                             unsigned long attrs)
 155{
 156#ifdef CONFIG_MMU
 157        unsigned long user_count = vma_pages(vma);
 158        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 159        unsigned long off = vma->vm_pgoff;
 160        unsigned long pfn;
 161
 162        if (off >= count || user_count > (count - off))
 163                return -ENXIO;
 164
 165        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 166        pfn = consistent_virt_to_pfn(cpu_addr);
 167        return remap_pfn_range(vma, vma->vm_start, pfn + off,
 168                               vma->vm_end - vma->vm_start, vma->vm_page_prot);
 169#else
 170        return -ENXIO;
 171#endif
 172}
 173
 174const struct dma_map_ops dma_nommu_ops = {
 175        .alloc                  = dma_nommu_alloc_coherent,
 176        .free                   = dma_nommu_free_coherent,
 177        .mmap                   = dma_nommu_mmap_coherent,
 178        .map_sg                 = dma_nommu_map_sg,
 179        .map_page               = dma_nommu_map_page,
 180        .unmap_page             = dma_nommu_unmap_page,
 181        .sync_single_for_cpu    = dma_nommu_sync_single_for_cpu,
 182        .sync_single_for_device = dma_nommu_sync_single_for_device,
 183        .sync_sg_for_cpu        = dma_nommu_sync_sg_for_cpu,
 184        .sync_sg_for_device     = dma_nommu_sync_sg_for_device,
 185};
 186EXPORT_SYMBOL(dma_nommu_ops);
 187
 188/* Number of entries preallocated for DMA-API debugging */
 189#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
 190
 191static int __init dma_init(void)
 192{
 193        dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
 194
 195        return 0;
 196}
 197fs_initcall(dma_init);
 198