linux/arch/mips/mm/dma-noncoherent.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
   4 * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
   5 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
   6 */
   7#include <linux/dma-direct.h>
   8#include <linux/dma-noncoherent.h>
   9#include <linux/dma-contiguous.h>
  10#include <linux/highmem.h>
  11
  12#include <asm/cache.h>
  13#include <asm/cpu-type.h>
  14#include <asm/dma-coherence.h>
  15#include <asm/io.h>
  16
  17/*
  18 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
  19 * fill random cachelines with stale data at any time, requiring an extra
  20 * flush post-DMA.
  21 *
  22 * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
  23 * terminology calls memory areas with hardware maintained coherency coherent.
  24 *
  25 * Note that the R14000 and R16000 should also be checked for in this condition.
  26 * However this function is only called on non-I/O-coherent systems and only the
  27 * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
  28 * SGI IP32 aka O2.
  29 */
  30static inline bool cpu_needs_post_dma_flush(void)
  31{
  32        switch (boot_cpu_type()) {
  33        case CPU_R10000:
  34        case CPU_R12000:
  35        case CPU_BMIPS5000:
  36        case CPU_LOONGSON2EF:
  37                return true;
  38        default:
  39                /*
  40                 * Presence of MAARs suggests that the CPU supports
  41                 * speculatively prefetching data, and therefore requires
  42                 * the post-DMA flush/invalidate.
  43                 */
  44                return cpu_has_maar;
  45        }
  46}
  47
  48void arch_dma_prep_coherent(struct page *page, size_t size)
  49{
  50        dma_cache_wback_inv((unsigned long)page_address(page), size);
  51}
  52
  53void *arch_dma_set_uncached(void *addr, size_t size)
  54{
  55        return (void *)(__pa(addr) + UNCAC_BASE);
  56}
  57
  58static inline void dma_sync_virt(void *addr, size_t size,
  59                enum dma_data_direction dir)
  60{
  61        switch (dir) {
  62        case DMA_TO_DEVICE:
  63                dma_cache_wback((unsigned long)addr, size);
  64                break;
  65
  66        case DMA_FROM_DEVICE:
  67                dma_cache_inv((unsigned long)addr, size);
  68                break;
  69
  70        case DMA_BIDIRECTIONAL:
  71                dma_cache_wback_inv((unsigned long)addr, size);
  72                break;
  73
  74        default:
  75                BUG();
  76        }
  77}
  78
  79/*
  80 * A single sg entry may refer to multiple physically contiguous pages.  But
  81 * we still need to process highmem pages individually.  If highmem is not
  82 * configured then the bulk of this loop gets optimized out.
  83 */
  84static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
  85                enum dma_data_direction dir)
  86{
  87        struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
  88        unsigned long offset = paddr & ~PAGE_MASK;
  89        size_t left = size;
  90
  91        do {
  92                size_t len = left;
  93
  94                if (PageHighMem(page)) {
  95                        void *addr;
  96
  97                        if (offset + len > PAGE_SIZE)
  98                                len = PAGE_SIZE - offset;
  99
 100                        addr = kmap_atomic(page);
 101                        dma_sync_virt(addr + offset, len, dir);
 102                        kunmap_atomic(addr);
 103                } else
 104                        dma_sync_virt(page_address(page) + offset, size, dir);
 105                offset = 0;
 106                page++;
 107                left -= len;
 108        } while (left);
 109}
 110
 111void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
 112                enum dma_data_direction dir)
 113{
 114        dma_sync_phys(paddr, size, dir);
 115}
 116
 117#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
 118void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 119                enum dma_data_direction dir)
 120{
 121        if (cpu_needs_post_dma_flush())
 122                dma_sync_phys(paddr, size, dir);
 123}
 124#endif
 125
 126void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 127                enum dma_data_direction direction)
 128{
 129        BUG_ON(direction == DMA_NONE);
 130
 131        dma_sync_virt(vaddr, size, direction);
 132}
 133
 134#ifdef CONFIG_DMA_PERDEV_COHERENT
 135void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 136                const struct iommu_ops *iommu, bool coherent)
 137{
 138        dev->dma_coherent = coherent;
 139}
 140#endif
 141