linux/arch/mips/mm/dma-default.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
   7 * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
   8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
   9 */
  10
  11#include <linux/types.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/mm.h>
  14#include <linux/module.h>
  15#include <linux/scatterlist.h>
  16#include <linux/string.h>
  17#include <linux/gfp.h>
  18#include <linux/highmem.h>
  19
  20#include <asm/cache.h>
  21#include <asm/io.h>
  22
  23#include <dma-coherence.h>
  24
  25static inline struct page *dma_addr_to_page(struct device *dev,
  26        dma_addr_t dma_addr)
  27{
  28        return pfn_to_page(
  29                plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
  30}
  31
  32/*
  33 * Warning on the terminology - Linux calls an uncached area coherent;
  34 * MIPS terminology calls memory areas with hardware maintained coherency
  35 * coherent.
  36 */
  37
  38static inline int cpu_is_noncoherent_r10000(struct device *dev)
  39{
  40        return !plat_device_is_coherent(dev) &&
  41               (current_cpu_type() == CPU_R10000 ||
  42               current_cpu_type() == CPU_R12000);
  43}
  44
  45static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
  46{
  47        gfp_t dma_flag;
  48
  49        /* ignore region specifiers */
  50        gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
  51
  52#ifdef CONFIG_ISA
  53        if (dev == NULL)
  54                dma_flag = __GFP_DMA;
  55        else
  56#endif
  57#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
  58             if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
  59                        dma_flag = __GFP_DMA;
  60        else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
  61                        dma_flag = __GFP_DMA32;
  62        else
  63#endif
  64#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
  65             if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
  66                dma_flag = __GFP_DMA32;
  67        else
  68#endif
  69#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
  70             if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
  71                dma_flag = __GFP_DMA;
  72        else
  73#endif
  74                dma_flag = 0;
  75
  76        /* Don't invoke OOM killer */
  77        gfp |= __GFP_NORETRY;
  78
  79        return gfp | dma_flag;
  80}
  81
  82void *dma_alloc_noncoherent(struct device *dev, size_t size,
  83        dma_addr_t * dma_handle, gfp_t gfp)
  84{
  85        void *ret;
  86
  87        gfp = massage_gfp_flags(dev, gfp);
  88
  89        ret = (void *) __get_free_pages(gfp, get_order(size));
  90
  91        if (ret != NULL) {
  92                memset(ret, 0, size);
  93                *dma_handle = plat_map_dma_mem(dev, ret, size);
  94        }
  95
  96        return ret;
  97}
  98EXPORT_SYMBOL(dma_alloc_noncoherent);
  99
 100static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
 101        dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
 102{
 103        void *ret;
 104
 105        if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
 106                return ret;
 107
 108        gfp = massage_gfp_flags(dev, gfp);
 109
 110        ret = (void *) __get_free_pages(gfp, get_order(size));
 111
 112        if (ret) {
 113                memset(ret, 0, size);
 114                *dma_handle = plat_map_dma_mem(dev, ret, size);
 115
 116                if (!plat_device_is_coherent(dev)) {
 117                        dma_cache_wback_inv((unsigned long) ret, size);
 118                        ret = UNCAC_ADDR(ret);
 119                }
 120        }
 121
 122        return ret;
 123}
 124
 125
 126void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
 127        dma_addr_t dma_handle)
 128{
 129        plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
 130        free_pages((unsigned long) vaddr, get_order(size));
 131}
 132EXPORT_SYMBOL(dma_free_noncoherent);
 133
 134static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
 135        dma_addr_t dma_handle, struct dma_attrs *attrs)
 136{
 137        unsigned long addr = (unsigned long) vaddr;
 138        int order = get_order(size);
 139
 140        if (dma_release_from_coherent(dev, order, vaddr))
 141                return;
 142
 143        plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
 144
 145        if (!plat_device_is_coherent(dev))
 146                addr = CAC_ADDR(addr);
 147
 148        free_pages(addr, get_order(size));
 149}
 150
 151static inline void __dma_sync_virtual(void *addr, size_t size,
 152        enum dma_data_direction direction)
 153{
 154        switch (direction) {
 155        case DMA_TO_DEVICE:
 156                dma_cache_wback((unsigned long)addr, size);
 157                break;
 158
 159        case DMA_FROM_DEVICE:
 160                dma_cache_inv((unsigned long)addr, size);
 161                break;
 162
 163        case DMA_BIDIRECTIONAL:
 164                dma_cache_wback_inv((unsigned long)addr, size);
 165                break;
 166
 167        default:
 168                BUG();
 169        }
 170}
 171
 172/*
 173 * A single sg entry may refer to multiple physically contiguous
 174 * pages. But we still need to process highmem pages individually.
 175 * If highmem is not configured then the bulk of this loop gets
 176 * optimized out.
 177 */
 178static inline void __dma_sync(struct page *page,
 179        unsigned long offset, size_t size, enum dma_data_direction direction)
 180{
 181        size_t left = size;
 182
 183        do {
 184                size_t len = left;
 185
 186                if (PageHighMem(page)) {
 187                        void *addr;
 188
 189                        if (offset + len > PAGE_SIZE) {
 190                                if (offset >= PAGE_SIZE) {
 191                                        page += offset >> PAGE_SHIFT;
 192                                        offset &= ~PAGE_MASK;
 193                                }
 194                                len = PAGE_SIZE - offset;
 195                        }
 196
 197                        addr = kmap_atomic(page);
 198                        __dma_sync_virtual(addr + offset, len, direction);
 199                        kunmap_atomic(addr);
 200                } else
 201                        __dma_sync_virtual(page_address(page) + offset,
 202                                           size, direction);
 203                offset = 0;
 204                page++;
 205                left -= len;
 206        } while (left);
 207}
 208
 209static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
 210        size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
 211{
 212        if (cpu_is_noncoherent_r10000(dev))
 213                __dma_sync(dma_addr_to_page(dev, dma_addr),
 214                           dma_addr & ~PAGE_MASK, size, direction);
 215
 216        plat_unmap_dma_mem(dev, dma_addr, size, direction);
 217}
 218
 219static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
 220        int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
 221{
 222        int i;
 223
 224        for (i = 0; i < nents; i++, sg++) {
 225                if (!plat_device_is_coherent(dev))
 226                        __dma_sync(sg_page(sg), sg->offset, sg->length,
 227                                   direction);
 228                sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
 229                                  sg->offset;
 230        }
 231
 232        return nents;
 233}
 234
 235static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
 236        unsigned long offset, size_t size, enum dma_data_direction direction,
 237        struct dma_attrs *attrs)
 238{
 239        if (!plat_device_is_coherent(dev))
 240                __dma_sync(page, offset, size, direction);
 241
 242        return plat_map_dma_mem_page(dev, page) + offset;
 243}
 244
 245static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 246        int nhwentries, enum dma_data_direction direction,
 247        struct dma_attrs *attrs)
 248{
 249        int i;
 250
 251        for (i = 0; i < nhwentries; i++, sg++) {
 252                if (!plat_device_is_coherent(dev) &&
 253                    direction != DMA_TO_DEVICE)
 254                        __dma_sync(sg_page(sg), sg->offset, sg->length,
 255                                   direction);
 256                plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
 257        }
 258}
 259
 260static void mips_dma_sync_single_for_cpu(struct device *dev,
 261        dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
 262{
 263        if (cpu_is_noncoherent_r10000(dev))
 264                __dma_sync(dma_addr_to_page(dev, dma_handle),
 265                           dma_handle & ~PAGE_MASK, size, direction);
 266}
 267
 268static void mips_dma_sync_single_for_device(struct device *dev,
 269        dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
 270{
 271        plat_extra_sync_for_device(dev);
 272        if (!plat_device_is_coherent(dev))
 273                __dma_sync(dma_addr_to_page(dev, dma_handle),
 274                           dma_handle & ~PAGE_MASK, size, direction);
 275}
 276
 277static void mips_dma_sync_sg_for_cpu(struct device *dev,
 278        struct scatterlist *sg, int nelems, enum dma_data_direction direction)
 279{
 280        int i;
 281
 282        /* Make sure that gcc doesn't leave the empty loop body.  */
 283        for (i = 0; i < nelems; i++, sg++) {
 284                if (cpu_is_noncoherent_r10000(dev))
 285                        __dma_sync(sg_page(sg), sg->offset, sg->length,
 286                                   direction);
 287        }
 288}
 289
 290static void mips_dma_sync_sg_for_device(struct device *dev,
 291        struct scatterlist *sg, int nelems, enum dma_data_direction direction)
 292{
 293        int i;
 294
 295        /* Make sure that gcc doesn't leave the empty loop body.  */
 296        for (i = 0; i < nelems; i++, sg++) {
 297                if (!plat_device_is_coherent(dev))
 298                        __dma_sync(sg_page(sg), sg->offset, sg->length,
 299                                   direction);
 300        }
 301}
 302
 303int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 304{
 305        return plat_dma_mapping_error(dev, dma_addr);
 306}
 307
 308int mips_dma_supported(struct device *dev, u64 mask)
 309{
 310        return plat_dma_supported(dev, mask);
 311}
 312
 313void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 314                         enum dma_data_direction direction)
 315{
 316        BUG_ON(direction == DMA_NONE);
 317
 318        plat_extra_sync_for_device(dev);
 319        if (!plat_device_is_coherent(dev))
 320                __dma_sync_virtual(vaddr, size, direction);
 321}
 322
 323EXPORT_SYMBOL(dma_cache_sync);
 324
 325static struct dma_map_ops mips_default_dma_map_ops = {
 326        .alloc = mips_dma_alloc_coherent,
 327        .free = mips_dma_free_coherent,
 328        .map_page = mips_dma_map_page,
 329        .unmap_page = mips_dma_unmap_page,
 330        .map_sg = mips_dma_map_sg,
 331        .unmap_sg = mips_dma_unmap_sg,
 332        .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
 333        .sync_single_for_device = mips_dma_sync_single_for_device,
 334        .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
 335        .sync_sg_for_device = mips_dma_sync_sg_for_device,
 336        .mapping_error = mips_dma_mapping_error,
 337        .dma_supported = mips_dma_supported
 338};
 339
 340struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
 341EXPORT_SYMBOL(mips_dma_map_ops);
 342
 343#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
 344
 345static int __init mips_dma_init(void)
 346{
 347        dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
 348
 349        return 0;
 350}
 351fs_initcall(mips_dma_init);
 352