linux/arch/openrisc/kernel/dma.c
<<
>>
Prefs
   1/*
   2 * OpenRISC Linux
   3 *
   4 * Linux architectural port borrowing liberally from similar works of
   5 * others.  All original copyrights apply as per the original source
   6 * declaration.
   7 *
   8 * Modifications for the OpenRISC architecture:
   9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  11 *
  12 *      This program is free software; you can redistribute it and/or
  13 *      modify it under the terms of the GNU General Public License
  14 *      as published by the Free Software Foundation; either version
  15 *      2 of the License, or (at your option) any later version.
  16 *
  17 * DMA mapping callbacks...
  18 * As alloc_coherent is the only DMA callback being used currently, that's
  19 * the only thing implemented properly.  The rest need looking into...
  20 */
  21
  22#include <linux/dma-mapping.h>
  23#include <linux/dma-debug.h>
  24#include <linux/export.h>
  25#include <linux/pagewalk.h>
  26
  27#include <asm/cpuinfo.h>
  28#include <asm/spr_defs.h>
  29#include <asm/tlbflush.h>
  30
  31static int
  32page_set_nocache(pte_t *pte, unsigned long addr,
  33                 unsigned long next, struct mm_walk *walk)
  34{
  35        unsigned long cl;
  36        struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
  37
  38        pte_val(*pte) |= _PAGE_CI;
  39
  40        /*
  41         * Flush the page out of the TLB so that the new page flags get
  42         * picked up next time there's an access
  43         */
  44        flush_tlb_page(NULL, addr);
  45
  46        /* Flush page out of dcache */
  47        for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
  48                mtspr(SPR_DCBFR, cl);
  49
  50        return 0;
  51}
  52
  53static const struct mm_walk_ops set_nocache_walk_ops = {
  54        .pte_entry              = page_set_nocache,
  55};
  56
  57static int
  58page_clear_nocache(pte_t *pte, unsigned long addr,
  59                   unsigned long next, struct mm_walk *walk)
  60{
  61        pte_val(*pte) &= ~_PAGE_CI;
  62
  63        /*
  64         * Flush the page out of the TLB so that the new page flags get
  65         * picked up next time there's an access
  66         */
  67        flush_tlb_page(NULL, addr);
  68
  69        return 0;
  70}
  71
  72static const struct mm_walk_ops clear_nocache_walk_ops = {
  73        .pte_entry              = page_clear_nocache,
  74};
  75
  76/*
  77 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
  78 *
  79 * This function effectively just calls __get_free_pages, sets the
  80 * cache-inhibit bit on those pages, and makes sure that the pages are
  81 * flushed out of the cache before they are used.
  82 *
  83 * If the NON_CONSISTENT attribute is set, then this function just
  84 * returns "normal", cachable memory.
  85 *
  86 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
  87 * into consideration here, too.  All current known implementations of
  88 * the OR1K support only strongly ordered memory accesses, so that flag
  89 * is being ignored for now; uncached but write-combined memory is a
  90 * missing feature of the OR1K.
  91 */
  92static void *
  93or1k_dma_alloc(struct device *dev, size_t size,
  94               dma_addr_t *dma_handle, gfp_t gfp,
  95               unsigned long attrs)
  96{
  97        unsigned long va;
  98        void *page;
  99
 100        page = alloc_pages_exact(size, gfp);
 101        if (!page)
 102                return NULL;
 103
 104        /* This gives us the real physical address of the first page. */
 105        *dma_handle = __pa(page);
 106
 107        va = (unsigned long)page;
 108
 109        if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
 110                /*
 111                 * We need to iterate through the pages, clearing the dcache for
 112                 * them and setting the cache-inhibit bit.
 113                 */
 114                if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
 115                                    NULL)) {
 116                        free_pages_exact(page, size);
 117                        return NULL;
 118                }
 119        }
 120
 121        return (void *)va;
 122}
 123
 124static void
 125or1k_dma_free(struct device *dev, size_t size, void *vaddr,
 126              dma_addr_t dma_handle, unsigned long attrs)
 127{
 128        unsigned long va = (unsigned long)vaddr;
 129
 130        if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
 131                /* walk_page_range shouldn't be able to fail here */
 132                WARN_ON(walk_page_range(&init_mm, va, va + size,
 133                                        &clear_nocache_walk_ops, NULL));
 134        }
 135
 136        free_pages_exact(vaddr, size);
 137}
 138
 139static dma_addr_t
 140or1k_map_page(struct device *dev, struct page *page,
 141              unsigned long offset, size_t size,
 142              enum dma_data_direction dir,
 143              unsigned long attrs)
 144{
 145        unsigned long cl;
 146        dma_addr_t addr = page_to_phys(page) + offset;
 147        struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
 148
 149        if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
 150                return addr;
 151
 152        switch (dir) {
 153        case DMA_TO_DEVICE:
 154                /* Flush the dcache for the requested range */
 155                for (cl = addr; cl < addr + size;
 156                     cl += cpuinfo->dcache_block_size)
 157                        mtspr(SPR_DCBFR, cl);
 158                break;
 159        case DMA_FROM_DEVICE:
 160                /* Invalidate the dcache for the requested range */
 161                for (cl = addr; cl < addr + size;
 162                     cl += cpuinfo->dcache_block_size)
 163                        mtspr(SPR_DCBIR, cl);
 164                break;
 165        default:
 166                /*
 167                 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
 168                 * flush nor invalidate the cache here as the area will need
 169                 * to be manually synced anyway.
 170                 */
 171                break;
 172        }
 173
 174        return addr;
 175}
 176
 177static void
 178or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
 179                size_t size, enum dma_data_direction dir,
 180                unsigned long attrs)
 181{
 182        /* Nothing special to do here... */
 183}
 184
 185static int
 186or1k_map_sg(struct device *dev, struct scatterlist *sg,
 187            int nents, enum dma_data_direction dir,
 188            unsigned long attrs)
 189{
 190        struct scatterlist *s;
 191        int i;
 192
 193        for_each_sg(sg, s, nents, i) {
 194                s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
 195                                               s->length, dir, 0);
 196        }
 197
 198        return nents;
 199}
 200
 201static void
 202or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
 203              int nents, enum dma_data_direction dir,
 204              unsigned long attrs)
 205{
 206        struct scatterlist *s;
 207        int i;
 208
 209        for_each_sg(sg, s, nents, i) {
 210                or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
 211        }
 212}
 213
 214static void
 215or1k_sync_single_for_cpu(struct device *dev,
 216                         dma_addr_t dma_handle, size_t size,
 217                         enum dma_data_direction dir)
 218{
 219        unsigned long cl;
 220        dma_addr_t addr = dma_handle;
 221        struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
 222
 223        /* Invalidate the dcache for the requested range */
 224        for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
 225                mtspr(SPR_DCBIR, cl);
 226}
 227
 228static void
 229or1k_sync_single_for_device(struct device *dev,
 230                            dma_addr_t dma_handle, size_t size,
 231                            enum dma_data_direction dir)
 232{
 233        unsigned long cl;
 234        dma_addr_t addr = dma_handle;
 235        struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
 236
 237        /* Flush the dcache for the requested range */
 238        for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
 239                mtspr(SPR_DCBFR, cl);
 240}
 241
 242const struct dma_map_ops or1k_dma_map_ops = {
 243        .alloc = or1k_dma_alloc,
 244        .free = or1k_dma_free,
 245        .map_page = or1k_map_page,
 246        .unmap_page = or1k_unmap_page,
 247        .map_sg = or1k_map_sg,
 248        .unmap_sg = or1k_unmap_sg,
 249        .sync_single_for_cpu = or1k_sync_single_for_cpu,
 250        .sync_single_for_device = or1k_sync_single_for_device,
 251};
 252EXPORT_SYMBOL(or1k_dma_map_ops);
 253