linux/arch/openrisc/kernel/dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * OpenRISC Linux
   4 *
   5 * Linux architectural port borrowing liberally from similar works of
   6 * others.  All original copyrights apply as per the original source
   7 * declaration.
   8 *
   9 * Modifications for the OpenRISC architecture:
  10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  12 *
  13 * DMA mapping callbacks...
  14 */
  15
  16#include <linux/dma-map-ops.h>
  17#include <linux/pagewalk.h>
  18
  19#include <asm/cpuinfo.h>
  20#include <asm/spr_defs.h>
  21#include <asm/tlbflush.h>
  22
  23static int
  24page_set_nocache(pte_t *pte, unsigned long addr,
  25                 unsigned long next, struct mm_walk *walk)
  26{
  27        unsigned long cl;
  28        struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
  29
  30        pte_val(*pte) |= _PAGE_CI;
  31
  32        /*
  33         * Flush the page out of the TLB so that the new page flags get
  34         * picked up next time there's an access
  35         */
  36        flush_tlb_page(NULL, addr);
  37
  38        /* Flush page out of dcache */
  39        for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
  40                mtspr(SPR_DCBFR, cl);
  41
  42        return 0;
  43}
  44
  45static const struct mm_walk_ops set_nocache_walk_ops = {
  46        .pte_entry              = page_set_nocache,
  47};
  48
  49static int
  50page_clear_nocache(pte_t *pte, unsigned long addr,
  51                   unsigned long next, struct mm_walk *walk)
  52{
  53        pte_val(*pte) &= ~_PAGE_CI;
  54
  55        /*
  56         * Flush the page out of the TLB so that the new page flags get
  57         * picked up next time there's an access
  58         */
  59        flush_tlb_page(NULL, addr);
  60
  61        return 0;
  62}
  63
  64static const struct mm_walk_ops clear_nocache_walk_ops = {
  65        .pte_entry              = page_clear_nocache,
  66};
  67
  68void *arch_dma_set_uncached(void *cpu_addr, size_t size)
  69{
  70        unsigned long va = (unsigned long)cpu_addr;
  71        int error;
  72
  73        /*
  74         * We need to iterate through the pages, clearing the dcache for
  75         * them and setting the cache-inhibit bit.
  76         */
  77        mmap_read_lock(&init_mm);
  78        error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
  79                        NULL);
  80        mmap_read_unlock(&init_mm);
  81
  82        if (error)
  83                return ERR_PTR(error);
  84        return cpu_addr;
  85}
  86
  87void arch_dma_clear_uncached(void *cpu_addr, size_t size)
  88{
  89        unsigned long va = (unsigned long)cpu_addr;
  90
  91        mmap_read_lock(&init_mm);
  92        /* walk_page_range shouldn't be able to fail here */
  93        WARN_ON(walk_page_range(&init_mm, va, va + size,
  94                        &clear_nocache_walk_ops, NULL));
  95        mmap_read_unlock(&init_mm);
  96}
  97
  98void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
  99                enum dma_data_direction dir)
 100{
 101        unsigned long cl;
 102        struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
 103
 104        switch (dir) {
 105        case DMA_TO_DEVICE:
 106                /* Flush the dcache for the requested range */
 107                for (cl = addr; cl < addr + size;
 108                     cl += cpuinfo->dcache_block_size)
 109                        mtspr(SPR_DCBFR, cl);
 110                break;
 111        case DMA_FROM_DEVICE:
 112                /* Invalidate the dcache for the requested range */
 113                for (cl = addr; cl < addr + size;
 114                     cl += cpuinfo->dcache_block_size)
 115                        mtspr(SPR_DCBIR, cl);
 116                break;
 117        default:
 118                /*
 119                 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
 120                 * flush nor invalidate the cache here as the area will need
 121                 * to be manually synced anyway.
 122                 */
 123                break;
 124        }
 125}
 126