linux/arch/xtensa/kernel/pci-dma.c
<<
>>
Prefs
   1/*
   2 * DMA coherent memory allocation.
   3 *
   4 * This program is free software; you can redistribute  it and/or modify it
   5 * under  the terms of  the GNU General  Public License as published by the
   6 * Free Software Foundation;  either version 2 of the  License, or (at your
   7 * option) any later version.
   8 *
   9 * Copyright (C) 2002 - 2005 Tensilica Inc.
  10 * Copyright (C) 2015 Cadence Design Systems Inc.
  11 *
  12 * Based on version for i386.
  13 *
  14 * Chris Zankel <chris@zankel.net>
  15 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
  16 */
  17
  18#include <linux/dma-contiguous.h>
  19#include <linux/dma-direct.h>
  20#include <linux/gfp.h>
  21#include <linux/highmem.h>
  22#include <linux/mm.h>
  23#include <linux/module.h>
  24#include <linux/pci.h>
  25#include <linux/string.h>
  26#include <linux/types.h>
  27#include <asm/cacheflush.h>
  28#include <asm/io.h>
  29
  30static void do_cache_op(dma_addr_t dma_handle, size_t size,
  31                        void (*fn)(unsigned long, unsigned long))
  32{
  33        unsigned long off = dma_handle & (PAGE_SIZE - 1);
  34        unsigned long pfn = PFN_DOWN(dma_handle);
  35        struct page *page = pfn_to_page(pfn);
  36
  37        if (!PageHighMem(page))
  38                fn((unsigned long)bus_to_virt(dma_handle), size);
  39        else
  40                while (size > 0) {
  41                        size_t sz = min_t(size_t, size, PAGE_SIZE - off);
  42                        void *vaddr = kmap_atomic(page);
  43
  44                        fn((unsigned long)vaddr + off, sz);
  45                        kunmap_atomic(vaddr);
  46                        off = 0;
  47                        ++page;
  48                        size -= sz;
  49                }
  50}
  51
  52static void xtensa_sync_single_for_cpu(struct device *dev,
  53                                       dma_addr_t dma_handle, size_t size,
  54                                       enum dma_data_direction dir)
  55{
  56        switch (dir) {
  57        case DMA_BIDIRECTIONAL:
  58        case DMA_FROM_DEVICE:
  59                do_cache_op(dma_handle, size, __invalidate_dcache_range);
  60                break;
  61
  62        case DMA_NONE:
  63                BUG();
  64                break;
  65
  66        default:
  67                break;
  68        }
  69}
  70
  71static void xtensa_sync_single_for_device(struct device *dev,
  72                                          dma_addr_t dma_handle, size_t size,
  73                                          enum dma_data_direction dir)
  74{
  75        switch (dir) {
  76        case DMA_BIDIRECTIONAL:
  77        case DMA_TO_DEVICE:
  78                if (XCHAL_DCACHE_IS_WRITEBACK)
  79                        do_cache_op(dma_handle, size, __flush_dcache_range);
  80                break;
  81
  82        case DMA_NONE:
  83                BUG();
  84                break;
  85
  86        default:
  87                break;
  88        }
  89}
  90
  91static void xtensa_sync_sg_for_cpu(struct device *dev,
  92                                   struct scatterlist *sg, int nents,
  93                                   enum dma_data_direction dir)
  94{
  95        struct scatterlist *s;
  96        int i;
  97
  98        for_each_sg(sg, s, nents, i) {
  99                xtensa_sync_single_for_cpu(dev, sg_dma_address(s),
 100                                           sg_dma_len(s), dir);
 101        }
 102}
 103
 104static void xtensa_sync_sg_for_device(struct device *dev,
 105                                      struct scatterlist *sg, int nents,
 106                                      enum dma_data_direction dir)
 107{
 108        struct scatterlist *s;
 109        int i;
 110
 111        for_each_sg(sg, s, nents, i) {
 112                xtensa_sync_single_for_device(dev, sg_dma_address(s),
 113                                              sg_dma_len(s), dir);
 114        }
 115}
 116
 117/*
 118 * Note: We assume that the full memory space is always mapped to 'kseg'
 119 *       Otherwise we have to use page attributes (not implemented).
 120 */
 121
 122static void *xtensa_dma_alloc(struct device *dev, size_t size,
 123                              dma_addr_t *handle, gfp_t flag,
 124                              unsigned long attrs)
 125{
 126        unsigned long ret;
 127        unsigned long uncached;
 128        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 129        struct page *page = NULL;
 130
 131        /* ignore region speicifiers */
 132
 133        flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
 134
 135        if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
 136                flag |= GFP_DMA;
 137
 138        if (gfpflags_allow_blocking(flag))
 139                page = dma_alloc_from_contiguous(dev, count, get_order(size),
 140                                                 flag & __GFP_NOWARN);
 141
 142        if (!page)
 143                page = alloc_pages(flag, get_order(size));
 144
 145        if (!page)
 146                return NULL;
 147
 148        *handle = phys_to_dma(dev, page_to_phys(page));
 149
 150#ifdef CONFIG_MMU
 151        if (PageHighMem(page)) {
 152                void *p;
 153
 154                p = dma_common_contiguous_remap(page, size, VM_MAP,
 155                                                pgprot_noncached(PAGE_KERNEL),
 156                                                __builtin_return_address(0));
 157                if (!p) {
 158                        if (!dma_release_from_contiguous(dev, page, count))
 159                                __free_pages(page, get_order(size));
 160                }
 161                return p;
 162        }
 163#endif
 164        ret = (unsigned long)page_address(page);
 165        BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
 166               ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
 167
 168        uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
 169        __invalidate_dcache_range(ret, size);
 170
 171        return (void *)uncached;
 172}
 173
 174static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
 175                            dma_addr_t dma_handle, unsigned long attrs)
 176{
 177        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 178        unsigned long addr = (unsigned long)vaddr;
 179        struct page *page;
 180
 181        if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
 182            addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
 183                addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
 184                page = virt_to_page(addr);
 185        } else {
 186#ifdef CONFIG_MMU
 187                dma_common_free_remap(vaddr, size, VM_MAP);
 188#endif
 189                page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
 190        }
 191
 192        if (!dma_release_from_contiguous(dev, page, count))
 193                __free_pages(page, get_order(size));
 194}
 195
 196static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
 197                                  unsigned long offset, size_t size,
 198                                  enum dma_data_direction dir,
 199                                  unsigned long attrs)
 200{
 201        dma_addr_t dma_handle = page_to_phys(page) + offset;
 202
 203        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 204                xtensa_sync_single_for_device(dev, dma_handle, size, dir);
 205
 206        return dma_handle;
 207}
 208
 209static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
 210                              size_t size, enum dma_data_direction dir,
 211                              unsigned long attrs)
 212{
 213        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 214                xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
 215}
 216
 217static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
 218                         int nents, enum dma_data_direction dir,
 219                         unsigned long attrs)
 220{
 221        struct scatterlist *s;
 222        int i;
 223
 224        for_each_sg(sg, s, nents, i) {
 225                s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset,
 226                                                 s->length, dir, attrs);
 227        }
 228        return nents;
 229}
 230
 231static void xtensa_unmap_sg(struct device *dev,
 232                            struct scatterlist *sg, int nents,
 233                            enum dma_data_direction dir,
 234                            unsigned long attrs)
 235{
 236        struct scatterlist *s;
 237        int i;
 238
 239        for_each_sg(sg, s, nents, i) {
 240                xtensa_unmap_page(dev, sg_dma_address(s),
 241                                  sg_dma_len(s), dir, attrs);
 242        }
 243}
 244
 245int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 246{
 247        return 0;
 248}
 249
 250const struct dma_map_ops xtensa_dma_map_ops = {
 251        .alloc = xtensa_dma_alloc,
 252        .free = xtensa_dma_free,
 253        .map_page = xtensa_map_page,
 254        .unmap_page = xtensa_unmap_page,
 255        .map_sg = xtensa_map_sg,
 256        .unmap_sg = xtensa_unmap_sg,
 257        .sync_single_for_cpu = xtensa_sync_single_for_cpu,
 258        .sync_single_for_device = xtensa_sync_single_for_device,
 259        .sync_sg_for_cpu = xtensa_sync_sg_for_cpu,
 260        .sync_sg_for_device = xtensa_sync_sg_for_device,
 261        .mapping_error = xtensa_dma_mapping_error,
 262};
 263EXPORT_SYMBOL(xtensa_dma_map_ops);
 264