linux/arch/powerpc/kernel/dma-iommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
   4 *
   5 * Provide default implementations of the DMA mapping callbacks for
   6 * busses using the iommu infrastructure
   7 */
   8
   9#include <linux/export.h>
  10#include <asm/iommu.h>
  11
  12/*
  13 * Generic iommu implementation
  14 */
  15
  16/* Allocates a contiguous real buffer and creates mappings over it.
  17 * Returns the virtual address of the buffer and sets dma_handle
  18 * to the dma address (mapping) of the first page.
  19 */
  20static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
  21                                      dma_addr_t *dma_handle, gfp_t flag,
  22                                      unsigned long attrs)
  23{
  24        return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
  25                                    dma_handle, dev->coherent_dma_mask, flag,
  26                                    dev_to_node(dev));
  27}
  28
  29static void dma_iommu_free_coherent(struct device *dev, size_t size,
  30                                    void *vaddr, dma_addr_t dma_handle,
  31                                    unsigned long attrs)
  32{
  33        iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
  34}
  35
  36/* Creates TCEs for a user provided buffer.  The user buffer must be
  37 * contiguous real kernel storage (not vmalloc).  The address passed here
  38 * comprises a page address and offset into that page. The dma_addr_t
  39 * returned will point to the same byte within the page as was passed in.
  40 */
  41static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
  42                                     unsigned long offset, size_t size,
  43                                     enum dma_data_direction direction,
  44                                     unsigned long attrs)
  45{
  46        return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
  47                              size, device_to_mask(dev), direction, attrs);
  48}
  49
  50
  51static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
  52                                 size_t size, enum dma_data_direction direction,
  53                                 unsigned long attrs)
  54{
  55        iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
  56                         attrs);
  57}
  58
  59
  60static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
  61                            int nelems, enum dma_data_direction direction,
  62                            unsigned long attrs)
  63{
  64        return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
  65                                device_to_mask(dev), direction, attrs);
  66}
  67
  68static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
  69                int nelems, enum dma_data_direction direction,
  70                unsigned long attrs)
  71{
  72        ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
  73                           direction, attrs);
  74}
  75
  76/* We support DMA to/from any memory page via the iommu */
  77int dma_iommu_dma_supported(struct device *dev, u64 mask)
  78{
  79        struct iommu_table *tbl = get_iommu_table_base(dev);
  80
  81        if (!tbl) {
  82                dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx"
  83                        ", table unavailable\n", mask);
  84                return 0;
  85        }
  86
  87        if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
  88                dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
  89                dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
  90                                mask, tbl->it_offset << tbl->it_page_shift);
  91                return 0;
  92        } else
  93                return 1;
  94}
  95
  96static u64 dma_iommu_get_required_mask(struct device *dev)
  97{
  98        struct iommu_table *tbl = get_iommu_table_base(dev);
  99        u64 mask;
 100        if (!tbl)
 101                return 0;
 102
 103        mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
 104        mask += mask - 1;
 105
 106        return mask;
 107}
 108
 109int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
 110{
 111        return dma_addr == IOMMU_MAPPING_ERROR;
 112}
 113
 114struct dma_map_ops dma_iommu_ops = {
 115        .alloc                  = dma_iommu_alloc_coherent,
 116        .free                   = dma_iommu_free_coherent,
 117        .mmap                   = dma_direct_mmap_coherent,
 118        .map_sg                 = dma_iommu_map_sg,
 119        .unmap_sg               = dma_iommu_unmap_sg,
 120        .dma_supported          = dma_iommu_dma_supported,
 121        .map_page               = dma_iommu_map_page,
 122        .unmap_page             = dma_iommu_unmap_page,
 123        .get_required_mask      = dma_iommu_get_required_mask,
 124        .mapping_error          = dma_iommu_mapping_error,
 125};
 126EXPORT_SYMBOL(dma_iommu_ops);
 127