linux/arch/arc/mm/dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   4 */
   5
   6#include <linux/dma-noncoherent.h>
   7#include <asm/cache.h>
   8#include <asm/cacheflush.h>
   9
  10/*
  11 * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
  12 *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
  13 *  - But still handle both coherent and non-coherent requests from caller
  14 *
  15 * For DMA coherent hardware (IOC) generic code suffices
  16 */
  17void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
  18                gfp_t gfp, unsigned long attrs)
  19{
  20        unsigned long order = get_order(size);
  21        struct page *page;
  22        phys_addr_t paddr;
  23        void *kvaddr;
  24        bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
  25
  26        /*
  27         * __GFP_HIGHMEM flag is cleared by upper layer functions
  28         * (in include/linux/dma-mapping.h) so we should never get a
  29         * __GFP_HIGHMEM here.
  30         */
  31        BUG_ON(gfp & __GFP_HIGHMEM);
  32
  33        page = alloc_pages(gfp | __GFP_ZERO, order);
  34        if (!page)
  35                return NULL;
  36
  37        /* This is linear addr (0x8000_0000 based) */
  38        paddr = page_to_phys(page);
  39
  40        *dma_handle = paddr;
  41
  42        /*
  43         * A coherent buffer needs MMU mapping to enforce non-cachability.
  44         * kvaddr is kernel Virtual address (0x7000_0000 based).
  45         */
  46        if (need_coh) {
  47                kvaddr = ioremap_nocache(paddr, size);
  48                if (kvaddr == NULL) {
  49                        __free_pages(page, order);
  50                        return NULL;
  51                }
  52        } else {
  53                kvaddr = (void *)(u32)paddr;
  54        }
  55
  56        /*
  57         * Evict any existing L1 and/or L2 lines for the backing page
  58         * in case it was used earlier as a normal "cached" page.
  59         * Yeah this bit us - STAR 9000898266
  60         *
  61         * Although core does call flush_cache_vmap(), it gets kvaddr hence
  62         * can't be used to efficiently flush L1 and/or L2 which need paddr
  63         * Currently flush_cache_vmap nukes the L1 cache completely which
  64         * will be optimized as a separate commit
  65         */
  66        if (need_coh)
  67                dma_cache_wback_inv(paddr, size);
  68
  69        return kvaddr;
  70}
  71
  72void arch_dma_free(struct device *dev, size_t size, void *vaddr,
  73                dma_addr_t dma_handle, unsigned long attrs)
  74{
  75        phys_addr_t paddr = dma_handle;
  76        struct page *page = virt_to_page(paddr);
  77
  78        if (!(attrs & DMA_ATTR_NON_CONSISTENT))
  79                iounmap((void __force __iomem *)vaddr);
  80
  81        __free_pages(page, get_order(size));
  82}
  83
  84long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
  85                dma_addr_t dma_addr)
  86{
  87        return __phys_to_pfn(dma_addr);
  88}
  89
  90/*
  91 * Cache operations depending on function and direction argument, inspired by
  92 * https://lkml.org/lkml/2018/5/18/979
  93 * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
  94 * dma-mapping: provide a generic dma-noncoherent implementation)"
  95 *
  96 *          |   map          ==  for_device     |   unmap     ==  for_cpu
  97 *          |----------------------------------------------------------------
  98 * TO_DEV   |   writeback        writeback      |   none          none
  99 * FROM_DEV |   invalidate       invalidate     |   invalidate*   invalidate*
 100 * BIDIR    |   writeback+inv    writeback+inv  |   invalidate    invalidate
 101 *
 102 *     [*] needed for CPU speculative prefetches
 103 *
 104 * NOTE: we don't check the validity of direction argument as it is done in
 105 * upper layer functions (in include/linux/dma-mapping.h)
 106 */
 107
 108void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
 109                size_t size, enum dma_data_direction dir)
 110{
 111        switch (dir) {
 112        case DMA_TO_DEVICE:
 113                dma_cache_wback(paddr, size);
 114                break;
 115
 116        case DMA_FROM_DEVICE:
 117                dma_cache_inv(paddr, size);
 118                break;
 119
 120        case DMA_BIDIRECTIONAL:
 121                dma_cache_wback_inv(paddr, size);
 122                break;
 123
 124        default:
 125                break;
 126        }
 127}
 128
 129void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
 130                size_t size, enum dma_data_direction dir)
 131{
 132        switch (dir) {
 133        case DMA_TO_DEVICE:
 134                break;
 135
 136        /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
 137        case DMA_FROM_DEVICE:
 138        case DMA_BIDIRECTIONAL:
 139                dma_cache_inv(paddr, size);
 140                break;
 141
 142        default:
 143                break;
 144        }
 145}
 146
 147/*
 148 * Plug in direct dma map ops.
 149 */
 150void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 151                        const struct iommu_ops *iommu, bool coherent)
 152{
 153        /*
 154         * IOC hardware snoops all DMA traffic keeping the caches consistent
 155         * with memory - eliding need for any explicit cache maintenance of
 156         * DMA buffers.
 157         */
 158        if (is_isa_arcv2() && ioc_enable && coherent)
 159                dev->dma_coherent = true;
 160
 161        dev_info(dev, "use %sncoherent DMA ops\n",
 162                 dev->dma_coherent ? "" : "non");
 163}
 164