linux/arch/arm/xen/mm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/cpu.h>
   3#include <linux/dma-noncoherent.h>
   4#include <linux/gfp.h>
   5#include <linux/highmem.h>
   6#include <linux/export.h>
   7#include <linux/memblock.h>
   8#include <linux/of_address.h>
   9#include <linux/slab.h>
  10#include <linux/types.h>
  11#include <linux/vmalloc.h>
  12#include <linux/swiotlb.h>
  13
  14#include <xen/xen.h>
  15#include <xen/interface/grant_table.h>
  16#include <xen/interface/memory.h>
  17#include <xen/page.h>
  18#include <xen/swiotlb-xen.h>
  19
  20#include <asm/cacheflush.h>
  21#include <asm/xen/hypercall.h>
  22#include <asm/xen/interface.h>
  23
  24unsigned long xen_get_swiotlb_free_pages(unsigned int order)
  25{
  26        struct memblock_region *reg;
  27        gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
  28
  29        for_each_memblock(memory, reg) {
  30                if (reg->base < (phys_addr_t)0xffffffff) {
  31                        if (IS_ENABLED(CONFIG_ZONE_DMA32))
  32                                flags |= __GFP_DMA32;
  33                        else
  34                                flags |= __GFP_DMA;
  35                        break;
  36                }
  37        }
  38        return __get_free_pages(flags, order);
  39}
  40
  41static bool hypercall_cflush = false;
  42
  43/* buffers in highmem or foreign pages cannot cross page boundaries */
  44static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
  45{
  46        struct gnttab_cache_flush cflush;
  47
  48        cflush.a.dev_bus_addr = handle & XEN_PAGE_MASK;
  49        cflush.offset = xen_offset_in_page(handle);
  50        cflush.op = op;
  51
  52        do {
  53                if (size + cflush.offset > XEN_PAGE_SIZE)
  54                        cflush.length = XEN_PAGE_SIZE - cflush.offset;
  55                else
  56                        cflush.length = size;
  57
  58                HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
  59
  60                cflush.offset = 0;
  61                cflush.a.dev_bus_addr += cflush.length;
  62                size -= cflush.length;
  63        } while (size);
  64}
  65
  66/*
  67 * Dom0 is mapped 1:1, and while the Linux page can span across multiple Xen
  68 * pages, it is not possible for it to contain a mix of local and foreign Xen
  69 * pages.  Calling pfn_valid on a foreign mfn will always return false, so if
  70 * pfn_valid returns true the pages is local and we can use the native
  71 * dma-direct functions, otherwise we call the Xen specific version.
  72 */
  73void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
  74                phys_addr_t paddr, size_t size, enum dma_data_direction dir)
  75{
  76        if (pfn_valid(PFN_DOWN(handle)))
  77                arch_sync_dma_for_cpu(dev, paddr, size, dir);
  78        else if (dir != DMA_TO_DEVICE)
  79                dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
  80}
  81
  82void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
  83                phys_addr_t paddr, size_t size, enum dma_data_direction dir)
  84{
  85        if (pfn_valid(PFN_DOWN(handle)))
  86                arch_sync_dma_for_device(dev, paddr, size, dir);
  87        else if (dir == DMA_FROM_DEVICE)
  88                dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
  89        else
  90                dma_cache_maint(handle, size, GNTTAB_CACHE_CLEAN);
  91}
  92
  93bool xen_arch_need_swiotlb(struct device *dev,
  94                           phys_addr_t phys,
  95                           dma_addr_t dev_addr)
  96{
  97        unsigned int xen_pfn = XEN_PFN_DOWN(phys);
  98        unsigned int bfn = XEN_PFN_DOWN(dev_addr);
  99
 100        /*
 101         * The swiotlb buffer should be used if
 102         *      - Xen doesn't have the cache flush hypercall
 103         *      - The Linux page refers to foreign memory
 104         *      - The device doesn't support coherent DMA request
 105         *
 106         * The Linux page may be spanned acrros multiple Xen page, although
 107         * it's not possible to have a mix of local and foreign Xen page.
 108         * Furthermore, range_straddles_page_boundary is already checking
 109         * if buffer is physically contiguous in the host RAM.
 110         *
 111         * Therefore we only need to check the first Xen page to know if we
 112         * require a bounce buffer because the device doesn't support coherent
 113         * memory and we are not able to flush the cache.
 114         */
 115        return (!hypercall_cflush && (xen_pfn != bfn) &&
 116                !dev_is_dma_coherent(dev));
 117}
 118
 119int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
 120                                 unsigned int address_bits,
 121                                 dma_addr_t *dma_handle)
 122{
 123        if (!xen_initial_domain())
 124                return -EINVAL;
 125
 126        /* we assume that dom0 is mapped 1:1 for now */
 127        *dma_handle = pstart;
 128        return 0;
 129}
 130
 131void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
 132{
 133        return;
 134}
 135
 136int __init xen_mm_init(void)
 137{
 138        struct gnttab_cache_flush cflush;
 139        if (!xen_initial_domain())
 140                return 0;
 141        xen_swiotlb_init(1, false);
 142
 143        cflush.op = 0;
 144        cflush.a.dev_bus_addr = 0;
 145        cflush.offset = 0;
 146        cflush.length = 0;
 147        if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
 148                hypercall_cflush = true;
 149        return 0;
 150}
 151arch_initcall(xen_mm_init);
 152