linux/lib/dma-direct.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * DMA operations that map physical memory directly without using an IOMMU or
   4 * flushing caches.
   5 */
   6#include <linux/export.h>
   7#include <linux/mm.h>
   8#include <linux/dma-direct.h>
   9#include <linux/scatterlist.h>
  10#include <linux/dma-contiguous.h>
  11#include <linux/pfn.h>
  12#include <linux/set_memory.h>
  13
  14#define DIRECT_MAPPING_ERROR            0
  15
  16/*
  17 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
  18 * some use it for entirely different regions:
  19 */
  20#ifndef ARCH_ZONE_DMA_BITS
  21#define ARCH_ZONE_DMA_BITS 24
  22#endif
  23
  24/*
  25 * For AMD SEV all DMA must be to unencrypted addresses.
  26 */
  27static inline bool force_dma_unencrypted(void)
  28{
  29        return sev_active();
  30}
  31
  32static bool
  33check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
  34                const char *caller)
  35{
  36        if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
  37                if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
  38                        dev_err(dev,
  39                                "%s: overflow %pad+%zu of device mask %llx\n",
  40                                caller, &dma_addr, size, *dev->dma_mask);
  41                }
  42                return false;
  43        }
  44        return true;
  45}
  46
  47static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
  48{
  49        dma_addr_t addr = force_dma_unencrypted() ?
  50                __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
  51        return addr + size - 1 <= dev->coherent_dma_mask;
  52}
  53
  54void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
  55                gfp_t gfp, unsigned long attrs)
  56{
  57        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  58        int page_order = get_order(size);
  59        struct page *page = NULL;
  60        void *ret;
  61
  62        /* we always manually zero the memory once we are done: */
  63        gfp &= ~__GFP_ZERO;
  64
  65        /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
  66        if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
  67                gfp |= GFP_DMA;
  68        if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
  69                gfp |= GFP_DMA32;
  70
  71again:
  72        /* CMA can be used only in the context which permits sleeping */
  73        if (gfpflags_allow_blocking(gfp)) {
  74                page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
  75                if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
  76                        dma_release_from_contiguous(dev, page, count);
  77                        page = NULL;
  78                }
  79        }
  80        if (!page)
  81                page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
  82
  83        if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
  84                __free_pages(page, page_order);
  85                page = NULL;
  86
  87                if (IS_ENABLED(CONFIG_ZONE_DMA) &&
  88                    dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
  89                    !(gfp & GFP_DMA)) {
  90                        gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
  91                        goto again;
  92                }
  93        }
  94
  95        if (!page)
  96                return NULL;
  97        ret = page_address(page);
  98        if (force_dma_unencrypted()) {
  99                set_memory_decrypted((unsigned long)ret, 1 << page_order);
 100                *dma_handle = __phys_to_dma(dev, page_to_phys(page));
 101        } else {
 102                *dma_handle = phys_to_dma(dev, page_to_phys(page));
 103        }
 104        memset(ret, 0, size);
 105        return ret;
 106}
 107
 108/*
 109 * NOTE: this function must never look at the dma_addr argument, because we want
 110 * to be able to use it as a helper for iommu implementations as well.
 111 */
 112void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
 113                dma_addr_t dma_addr, unsigned long attrs)
 114{
 115        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 116        unsigned int page_order = get_order(size);
 117
 118        if (force_dma_unencrypted())
 119                set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
 120        if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
 121                free_pages((unsigned long)cpu_addr, page_order);
 122}
 123
 124static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
 125                unsigned long offset, size_t size, enum dma_data_direction dir,
 126                unsigned long attrs)
 127{
 128        dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
 129
 130        if (!check_addr(dev, dma_addr, size, __func__))
 131                return DIRECT_MAPPING_ERROR;
 132        return dma_addr;
 133}
 134
 135static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
 136                int nents, enum dma_data_direction dir, unsigned long attrs)
 137{
 138        int i;
 139        struct scatterlist *sg;
 140
 141        for_each_sg(sgl, sg, nents, i) {
 142                BUG_ON(!sg_page(sg));
 143
 144                sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
 145                if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
 146                        return 0;
 147                sg_dma_len(sg) = sg->length;
 148        }
 149
 150        return nents;
 151}
 152
 153int dma_direct_supported(struct device *dev, u64 mask)
 154{
 155#ifdef CONFIG_ZONE_DMA
 156        if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
 157                return 0;
 158#else
 159        /*
 160         * Because 32-bit DMA masks are so common we expect every architecture
 161         * to be able to satisfy them - either by not supporting more physical
 162         * memory, or by providing a ZONE_DMA32.  If neither is the case, the
 163         * architecture needs to use an IOMMU instead of the direct mapping.
 164         */
 165        if (mask < DMA_BIT_MASK(32))
 166                return 0;
 167#endif
 168        return 1;
 169}
 170
 171static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
 172{
 173        return dma_addr == DIRECT_MAPPING_ERROR;
 174}
 175
 176const struct dma_map_ops dma_direct_ops = {
 177        .alloc                  = dma_direct_alloc,
 178        .free                   = dma_direct_free,
 179        .map_page               = dma_direct_map_page,
 180        .map_sg                 = dma_direct_map_sg,
 181        .dma_supported          = dma_direct_supported,
 182        .mapping_error          = dma_direct_mapping_error,
 183        .is_phys                = 1,
 184};
 185EXPORT_SYMBOL(dma_direct_ops);
 186