linux/arch/microblaze/mm/consistent.c
<<
>>
Prefs
   1/*
   2 * Microblaze support for cache consistent memory.
   3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
   4 * Copyright (C) 2010 PetaLogix
   5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
   6 *
   7 * Based on PowerPC version derived from arch/arm/mm/consistent.c
   8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
   9 * Copyright (C) 2000 Russell King
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License version 2 as
  13 * published by the Free Software Foundation.
  14 */
  15
  16#include <linux/export.h>
  17#include <linux/signal.h>
  18#include <linux/sched.h>
  19#include <linux/kernel.h>
  20#include <linux/errno.h>
  21#include <linux/string.h>
  22#include <linux/types.h>
  23#include <linux/ptrace.h>
  24#include <linux/mman.h>
  25#include <linux/mm.h>
  26#include <linux/swap.h>
  27#include <linux/stddef.h>
  28#include <linux/vmalloc.h>
  29#include <linux/init.h>
  30#include <linux/delay.h>
  31#include <linux/memblock.h>
  32#include <linux/highmem.h>
  33#include <linux/pci.h>
  34#include <linux/interrupt.h>
  35#include <linux/gfp.h>
  36#include <linux/dma-noncoherent.h>
  37
  38#include <asm/pgalloc.h>
  39#include <linux/io.h>
  40#include <linux/hardirq.h>
  41#include <linux/mmu_context.h>
  42#include <asm/mmu.h>
  43#include <linux/uaccess.h>
  44#include <asm/pgtable.h>
  45#include <asm/cpuinfo.h>
  46#include <asm/tlbflush.h>
  47
  48#ifndef CONFIG_MMU
  49/* I have to use dcache values because I can't relate on ram size */
  50# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
  51#endif
  52
  53/*
  54 * Consistent memory allocators. Used for DMA devices that want to
  55 * share uncached memory with the processor core.
  56 * My crufty no-MMU approach is simple. In the HW platform we can optionally
  57 * mirror the DDR up above the processor cacheable region.  So, memory accessed
  58 * in this mirror region will not be cached.  It's alloced from the same
  59 * pool as normal memory, but the handle we return is shifted up into the
  60 * uncached region.  This will no doubt cause big problems if memory allocated
  61 * here is not also freed properly. -- JW
  62 */
  63void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
  64                gfp_t gfp, unsigned long attrs)
  65{
  66        unsigned long order, vaddr;
  67        void *ret;
  68        unsigned int i, err = 0;
  69        struct page *page, *end;
  70
  71#ifdef CONFIG_MMU
  72        phys_addr_t pa;
  73        struct vm_struct *area;
  74        unsigned long va;
  75#endif
  76
  77        if (in_interrupt())
  78                BUG();
  79
  80        /* Only allocate page size areas. */
  81        size = PAGE_ALIGN(size);
  82        order = get_order(size);
  83
  84        vaddr = __get_free_pages(gfp, order);
  85        if (!vaddr)
  86                return NULL;
  87
  88        /*
  89         * we need to ensure that there are no cachelines in use,
  90         * or worse dirty in this area.
  91         */
  92        flush_dcache_range(virt_to_phys((void *)vaddr),
  93                                        virt_to_phys((void *)vaddr) + size);
  94
  95#ifndef CONFIG_MMU
  96        ret = (void *)vaddr;
  97        /*
  98         * Here's the magic!  Note if the uncached shadow is not implemented,
  99         * it's up to the calling code to also test that condition and make
 100         * other arranegments, such as manually flushing the cache and so on.
 101         */
 102# ifdef CONFIG_XILINX_UNCACHED_SHADOW
 103        ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
 104# endif
 105        if ((unsigned int)ret > cpuinfo.dcache_base &&
 106                                (unsigned int)ret < cpuinfo.dcache_high)
 107                pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
 108
 109        /* dma_handle is same as physical (shadowed) address */
 110        *dma_handle = (dma_addr_t)ret;
 111#else
 112        /* Allocate some common virtual space to map the new pages. */
 113        area = get_vm_area(size, VM_ALLOC);
 114        if (!area) {
 115                free_pages(vaddr, order);
 116                return NULL;
 117        }
 118        va = (unsigned long) area->addr;
 119        ret = (void *)va;
 120
 121        /* This gives us the real physical address of the first page. */
 122        *dma_handle = pa = __virt_to_phys(vaddr);
 123#endif
 124
 125        /*
 126         * free wasted pages.  We skip the first page since we know
 127         * that it will have count = 1 and won't require freeing.
 128         * We also mark the pages in use as reserved so that
 129         * remap_page_range works.
 130         */
 131        page = virt_to_page(vaddr);
 132        end = page + (1 << order);
 133
 134        split_page(page, order);
 135
 136        for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
 137#ifdef CONFIG_MMU
 138                /* MS: This is the whole magic - use cache inhibit pages */
 139                err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
 140#endif
 141
 142                SetPageReserved(page);
 143                page++;
 144        }
 145
 146        /* Free the otherwise unused pages. */
 147        while (page < end) {
 148                __free_page(page);
 149                page++;
 150        }
 151
 152        if (err) {
 153                free_pages(vaddr, order);
 154                return NULL;
 155        }
 156
 157        return ret;
 158}
 159
 160#ifdef CONFIG_MMU
 161static pte_t *consistent_virt_to_pte(void *vaddr)
 162{
 163        unsigned long addr = (unsigned long)vaddr;
 164
 165        return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
 166}
 167
 168long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
 169                dma_addr_t dma_addr)
 170{
 171        pte_t *ptep = consistent_virt_to_pte(vaddr);
 172
 173        if (pte_none(*ptep) || !pte_present(*ptep))
 174                return 0;
 175
 176        return pte_pfn(*ptep);
 177}
 178#endif
 179
 180/*
 181 * free page(s) as defined by the above mapping.
 182 */
 183void arch_dma_free(struct device *dev, size_t size, void *vaddr,
 184                dma_addr_t dma_addr, unsigned long attrs)
 185{
 186        struct page *page;
 187
 188        if (in_interrupt())
 189                BUG();
 190
 191        size = PAGE_ALIGN(size);
 192
 193#ifndef CONFIG_MMU
 194        /* Clear SHADOW_MASK bit in address, and free as per usual */
 195# ifdef CONFIG_XILINX_UNCACHED_SHADOW
 196        vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
 197# endif
 198        page = virt_to_page(vaddr);
 199
 200        do {
 201                __free_reserved_page(page);
 202                page++;
 203        } while (size -= PAGE_SIZE);
 204#else
 205        do {
 206                pte_t *ptep = consistent_virt_to_pte(vaddr);
 207                unsigned long pfn;
 208
 209                if (!pte_none(*ptep) && pte_present(*ptep)) {
 210                        pfn = pte_pfn(*ptep);
 211                        pte_clear(&init_mm, (unsigned int)vaddr, ptep);
 212                        if (pfn_valid(pfn)) {
 213                                page = pfn_to_page(pfn);
 214                                __free_reserved_page(page);
 215                        }
 216                }
 217                vaddr += PAGE_SIZE;
 218        } while (size -= PAGE_SIZE);
 219
 220        /* flush tlb */
 221        flush_tlb_all();
 222#endif
 223}
 224