linux/include/xen/arm/page-coherent.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
   3#define _ASM_ARM_XEN_PAGE_COHERENT_H
   4
   5#include <asm/page.h>
   6#include <asm/dma-mapping.h>
   7#include <linux/dma-mapping.h>
   8
   9static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
  10{
  11        if (dev && dev->archdata.dev_dma_ops)
  12                return dev->archdata.dev_dma_ops;
  13        return get_arch_dma_ops(NULL);
  14}
  15
  16void __xen_dma_map_page(struct device *hwdev, struct page *page,
  17             dma_addr_t dev_addr, unsigned long offset, size_t size,
  18             enum dma_data_direction dir, unsigned long attrs);
  19void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
  20                size_t size, enum dma_data_direction dir,
  21                unsigned long attrs);
  22void __xen_dma_sync_single_for_cpu(struct device *hwdev,
  23                dma_addr_t handle, size_t size, enum dma_data_direction dir);
  24
  25void __xen_dma_sync_single_for_device(struct device *hwdev,
  26                dma_addr_t handle, size_t size, enum dma_data_direction dir);
  27
  28static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
  29                dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
  30{
  31        return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
  32}
  33
  34static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
  35                void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
  36{
  37        xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
  38}
  39
  40static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
  41             dma_addr_t dev_addr, unsigned long offset, size_t size,
  42             enum dma_data_direction dir, unsigned long attrs)
  43{
  44        unsigned long page_pfn = page_to_xen_pfn(page);
  45        unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
  46        unsigned long compound_pages =
  47                (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
  48        bool local = (page_pfn <= dev_pfn) &&
  49                (dev_pfn - page_pfn < compound_pages);
  50
  51        /*
  52         * Dom0 is mapped 1:1, while the Linux page can span across
  53         * multiple Xen pages, it's not possible for it to contain a
  54         * mix of local and foreign Xen pages. So if the first xen_pfn
  55         * == mfn the page is local otherwise it's a foreign page
  56         * grant-mapped in dom0. If the page is local we can safely
  57         * call the native dma_ops function, otherwise we call the xen
  58         * specific function.
  59         */
  60        if (local)
  61                xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
  62        else
  63                __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
  64}
  65
  66static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
  67                size_t size, enum dma_data_direction dir, unsigned long attrs)
  68{
  69        unsigned long pfn = PFN_DOWN(handle);
  70        /*
  71         * Dom0 is mapped 1:1, while the Linux page can be spanned accross
  72         * multiple Xen page, it's not possible to have a mix of local and
  73         * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
  74         * foreign mfn will always return false. If the page is local we can
  75         * safely call the native dma_ops function, otherwise we call the xen
  76         * specific function.
  77         */
  78        if (pfn_valid(pfn)) {
  79                if (xen_get_dma_ops(hwdev)->unmap_page)
  80                        xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
  81        } else
  82                __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
  83}
  84
  85static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
  86                dma_addr_t handle, size_t size, enum dma_data_direction dir)
  87{
  88        unsigned long pfn = PFN_DOWN(handle);
  89        if (pfn_valid(pfn)) {
  90                if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
  91                        xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
  92        } else
  93                __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
  94}
  95
  96static inline void xen_dma_sync_single_for_device(struct device *hwdev,
  97                dma_addr_t handle, size_t size, enum dma_data_direction dir)
  98{
  99        unsigned long pfn = PFN_DOWN(handle);
 100        if (pfn_valid(pfn)) {
 101                if (xen_get_dma_ops(hwdev)->sync_single_for_device)
 102                        xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
 103        } else
 104                __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
 105}
 106
 107#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
 108