linux/arch/powerpc/kernel/dma.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
   3 *
   4 * Provide default implementations of the DMA mapping callbacks for
   5 * directly mapped busses.
   6 */
   7
   8#include <linux/device.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/dma-debug.h>
  11#include <linux/gfp.h>
  12#include <linux/memblock.h>
  13#include <linux/export.h>
  14#include <linux/pci.h>
  15#include <asm/vio.h>
  16#include <asm/bug.h>
  17#include <asm/machdep.h>
  18#include <asm/swiotlb.h>
  19
  20/*
  21 * Generic direct DMA implementation
  22 *
  23 * This implementation supports a per-device offset that can be applied if
  24 * the address at which memory is visible to devices is not 0. Platform code
  25 * can set archdata.dma_data to an unsigned long holding the offset. By
  26 * default the offset is PCI_DRAM_OFFSET.
  27 */
  28
  29static u64 __maybe_unused get_pfn_limit(struct device *dev)
  30{
  31        u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
  32        struct dev_archdata __maybe_unused *sd = &dev->archdata;
  33
  34#ifdef CONFIG_SWIOTLB
  35        if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops)
  36                pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
  37#endif
  38
  39        return pfn;
  40}
  41
  42void *dma_direct_alloc_coherent(struct device *dev, size_t size,
  43                                dma_addr_t *dma_handle, gfp_t flag,
  44                                struct dma_attrs *attrs)
  45{
  46        void *ret;
  47#ifdef CONFIG_NOT_COHERENT_CACHE
  48        ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
  49        if (ret == NULL)
  50                return NULL;
  51        *dma_handle += get_dma_offset(dev);
  52        return ret;
  53#else
  54        struct page *page;
  55        int node = dev_to_node(dev);
  56#ifdef CONFIG_FSL_SOC
  57        u64 pfn = get_pfn_limit(dev);
  58        int zone;
  59
  60        /*
  61         * This code should be OK on other platforms, but we have drivers that
  62         * don't set coherent_dma_mask. As a workaround we just ifdef it. This
  63         * whole routine needs some serious cleanup.
  64         */
  65
  66        zone = dma_pfn_limit_to_zone(pfn);
  67        if (zone < 0) {
  68                dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
  69                        __func__, pfn);
  70                return NULL;
  71        }
  72
  73        switch (zone) {
  74        case ZONE_DMA:
  75                flag |= GFP_DMA;
  76                break;
  77#ifdef CONFIG_ZONE_DMA32
  78        case ZONE_DMA32:
  79                flag |= GFP_DMA32;
  80                break;
  81#endif
  82        };
  83#endif /* CONFIG_FSL_SOC */
  84
  85        /* ignore region specifiers */
  86        flag  &= ~(__GFP_HIGHMEM);
  87
  88        page = alloc_pages_node(node, flag, get_order(size));
  89        if (page == NULL)
  90                return NULL;
  91        ret = page_address(page);
  92        memset(ret, 0, size);
  93        *dma_handle = __pa(ret) + get_dma_offset(dev);
  94
  95        return ret;
  96#endif
  97}
  98
  99void dma_direct_free_coherent(struct device *dev, size_t size,
 100                              void *vaddr, dma_addr_t dma_handle,
 101                              struct dma_attrs *attrs)
 102{
 103#ifdef CONFIG_NOT_COHERENT_CACHE
 104        __dma_free_coherent(size, vaddr);
 105#else
 106        free_pages((unsigned long)vaddr, get_order(size));
 107#endif
 108}
 109
 110int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
 111                             void *cpu_addr, dma_addr_t handle, size_t size,
 112                             struct dma_attrs *attrs)
 113{
 114        unsigned long pfn;
 115
 116#ifdef CONFIG_NOT_COHERENT_CACHE
 117        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 118        pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
 119#else
 120        pfn = page_to_pfn(virt_to_page(cpu_addr));
 121#endif
 122        return remap_pfn_range(vma, vma->vm_start,
 123                               pfn + vma->vm_pgoff,
 124                               vma->vm_end - vma->vm_start,
 125                               vma->vm_page_prot);
 126}
 127
 128static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
 129                             int nents, enum dma_data_direction direction,
 130                             struct dma_attrs *attrs)
 131{
 132        struct scatterlist *sg;
 133        int i;
 134
 135        for_each_sg(sgl, sg, nents, i) {
 136                sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
 137                sg->dma_length = sg->length;
 138                __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
 139        }
 140
 141        return nents;
 142}
 143
 144static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
 145                                int nents, enum dma_data_direction direction,
 146                                struct dma_attrs *attrs)
 147{
 148}
 149
 150static int dma_direct_dma_supported(struct device *dev, u64 mask)
 151{
 152#ifdef CONFIG_PPC64
 153        /* Could be improved so platforms can set the limit in case
 154         * they have limited DMA windows
 155         */
 156        return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
 157#else
 158        return 1;
 159#endif
 160}
 161
 162static u64 dma_direct_get_required_mask(struct device *dev)
 163{
 164        u64 end, mask;
 165
 166        end = memblock_end_of_DRAM() + get_dma_offset(dev);
 167
 168        mask = 1ULL << (fls64(end) - 1);
 169        mask += mask - 1;
 170
 171        return mask;
 172}
 173
 174static inline dma_addr_t dma_direct_map_page(struct device *dev,
 175                                             struct page *page,
 176                                             unsigned long offset,
 177                                             size_t size,
 178                                             enum dma_data_direction dir,
 179                                             struct dma_attrs *attrs)
 180{
 181        BUG_ON(dir == DMA_NONE);
 182        __dma_sync_page(page, offset, size, dir);
 183        return page_to_phys(page) + offset + get_dma_offset(dev);
 184}
 185
 186static inline void dma_direct_unmap_page(struct device *dev,
 187                                         dma_addr_t dma_address,
 188                                         size_t size,
 189                                         enum dma_data_direction direction,
 190                                         struct dma_attrs *attrs)
 191{
 192}
 193
 194#ifdef CONFIG_NOT_COHERENT_CACHE
 195static inline void dma_direct_sync_sg(struct device *dev,
 196                struct scatterlist *sgl, int nents,
 197                enum dma_data_direction direction)
 198{
 199        struct scatterlist *sg;
 200        int i;
 201
 202        for_each_sg(sgl, sg, nents, i)
 203                __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
 204}
 205
 206static inline void dma_direct_sync_single(struct device *dev,
 207                                          dma_addr_t dma_handle, size_t size,
 208                                          enum dma_data_direction direction)
 209{
 210        __dma_sync(bus_to_virt(dma_handle), size, direction);
 211}
 212#endif
 213
 214struct dma_map_ops dma_direct_ops = {
 215        .alloc                          = dma_direct_alloc_coherent,
 216        .free                           = dma_direct_free_coherent,
 217        .mmap                           = dma_direct_mmap_coherent,
 218        .map_sg                         = dma_direct_map_sg,
 219        .unmap_sg                       = dma_direct_unmap_sg,
 220        .dma_supported                  = dma_direct_dma_supported,
 221        .map_page                       = dma_direct_map_page,
 222        .unmap_page                     = dma_direct_unmap_page,
 223        .get_required_mask              = dma_direct_get_required_mask,
 224#ifdef CONFIG_NOT_COHERENT_CACHE
 225        .sync_single_for_cpu            = dma_direct_sync_single,
 226        .sync_single_for_device         = dma_direct_sync_single,
 227        .sync_sg_for_cpu                = dma_direct_sync_sg,
 228        .sync_sg_for_device             = dma_direct_sync_sg,
 229#endif
 230};
 231EXPORT_SYMBOL(dma_direct_ops);
 232
 233#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
 234
 235int __dma_set_mask(struct device *dev, u64 dma_mask)
 236{
 237        struct dma_map_ops *dma_ops = get_dma_ops(dev);
 238
 239        if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
 240                return dma_ops->set_dma_mask(dev, dma_mask);
 241        if (!dev->dma_mask || !dma_supported(dev, dma_mask))
 242                return -EIO;
 243        *dev->dma_mask = dma_mask;
 244        return 0;
 245}
 246
 247int dma_set_mask(struct device *dev, u64 dma_mask)
 248{
 249        if (ppc_md.dma_set_mask)
 250                return ppc_md.dma_set_mask(dev, dma_mask);
 251        return __dma_set_mask(dev, dma_mask);
 252}
 253EXPORT_SYMBOL(dma_set_mask);
 254
 255u64 __dma_get_required_mask(struct device *dev)
 256{
 257        struct dma_map_ops *dma_ops = get_dma_ops(dev);
 258
 259        if (unlikely(dma_ops == NULL))
 260                return 0;
 261
 262        if (dma_ops->get_required_mask)
 263                return dma_ops->get_required_mask(dev);
 264
 265        return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
 266}
 267
 268u64 dma_get_required_mask(struct device *dev)
 269{
 270        if (ppc_md.dma_get_required_mask)
 271                return ppc_md.dma_get_required_mask(dev);
 272
 273        return __dma_get_required_mask(dev);
 274}
 275EXPORT_SYMBOL_GPL(dma_get_required_mask);
 276
 277static int __init dma_init(void)
 278{
 279        dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
 280#ifdef CONFIG_PCI
 281        dma_debug_add_bus(&pci_bus_type);
 282#endif
 283#ifdef CONFIG_IBMVIO
 284        dma_debug_add_bus(&vio_bus_type);
 285#endif
 286
 287       return 0;
 288}
 289fs_initcall(dma_init);
 290
 291