linux/arch/microblaze/mm/consistent.c
<<
>>
Prefs
   1/*
   2 * Microblaze support for cache consistent memory.
   3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
   4 * Copyright (C) 2010 PetaLogix
   5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
   6 *
   7 * Based on PowerPC version derived from arch/arm/mm/consistent.c
   8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
   9 * Copyright (C) 2000 Russell King
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License version 2 as
  13 * published by the Free Software Foundation.
  14 */
  15
  16#include <linux/export.h>
  17#include <linux/signal.h>
  18#include <linux/sched.h>
  19#include <linux/kernel.h>
  20#include <linux/errno.h>
  21#include <linux/string.h>
  22#include <linux/types.h>
  23#include <linux/ptrace.h>
  24#include <linux/mman.h>
  25#include <linux/mm.h>
  26#include <linux/swap.h>
  27#include <linux/stddef.h>
  28#include <linux/vmalloc.h>
  29#include <linux/init.h>
  30#include <linux/delay.h>
  31#include <linux/bootmem.h>
  32#include <linux/highmem.h>
  33#include <linux/pci.h>
  34#include <linux/interrupt.h>
  35#include <linux/gfp.h>
  36
  37#include <asm/pgalloc.h>
  38#include <linux/io.h>
  39#include <linux/hardirq.h>
  40#include <linux/mmu_context.h>
  41#include <asm/mmu.h>
  42#include <linux/uaccess.h>
  43#include <asm/pgtable.h>
  44#include <asm/cpuinfo.h>
  45#include <asm/tlbflush.h>
  46
  47#ifndef CONFIG_MMU
  48/* I have to use dcache values because I can't relate on ram size */
  49# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
  50#endif
  51
  52/*
  53 * Consistent memory allocators. Used for DMA devices that want to
  54 * share uncached memory with the processor core.
  55 * My crufty no-MMU approach is simple. In the HW platform we can optionally
  56 * mirror the DDR up above the processor cacheable region.  So, memory accessed
  57 * in this mirror region will not be cached.  It's alloced from the same
  58 * pool as normal memory, but the handle we return is shifted up into the
  59 * uncached region.  This will no doubt cause big problems if memory allocated
  60 * here is not also freed properly. -- JW
  61 */
  62void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
  63{
  64        unsigned long order, vaddr;
  65        void *ret;
  66        unsigned int i, err = 0;
  67        struct page *page, *end;
  68
  69#ifdef CONFIG_MMU
  70        phys_addr_t pa;
  71        struct vm_struct *area;
  72        unsigned long va;
  73#endif
  74
  75        if (in_interrupt())
  76                BUG();
  77
  78        /* Only allocate page size areas. */
  79        size = PAGE_ALIGN(size);
  80        order = get_order(size);
  81
  82        vaddr = __get_free_pages(gfp, order);
  83        if (!vaddr)
  84                return NULL;
  85
  86        /*
  87         * we need to ensure that there are no cachelines in use,
  88         * or worse dirty in this area.
  89         */
  90        flush_dcache_range(virt_to_phys((void *)vaddr),
  91                                        virt_to_phys((void *)vaddr) + size);
  92
  93#ifndef CONFIG_MMU
  94        ret = (void *)vaddr;
  95        /*
  96         * Here's the magic!  Note if the uncached shadow is not implemented,
  97         * it's up to the calling code to also test that condition and make
  98         * other arranegments, such as manually flushing the cache and so on.
  99         */
 100# ifdef CONFIG_XILINX_UNCACHED_SHADOW
 101        ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
 102# endif
 103        if ((unsigned int)ret > cpuinfo.dcache_base &&
 104                                (unsigned int)ret < cpuinfo.dcache_high)
 105                pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
 106
 107        /* dma_handle is same as physical (shadowed) address */
 108        *dma_handle = (dma_addr_t)ret;
 109#else
 110        /* Allocate some common virtual space to map the new pages. */
 111        area = get_vm_area(size, VM_ALLOC);
 112        if (!area) {
 113                free_pages(vaddr, order);
 114                return NULL;
 115        }
 116        va = (unsigned long) area->addr;
 117        ret = (void *)va;
 118
 119        /* This gives us the real physical address of the first page. */
 120        *dma_handle = pa = virt_to_bus((void *)vaddr);
 121#endif
 122
 123        /*
 124         * free wasted pages.  We skip the first page since we know
 125         * that it will have count = 1 and won't require freeing.
 126         * We also mark the pages in use as reserved so that
 127         * remap_page_range works.
 128         */
 129        page = virt_to_page(vaddr);
 130        end = page + (1 << order);
 131
 132        split_page(page, order);
 133
 134        for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
 135#ifdef CONFIG_MMU
 136                /* MS: This is the whole magic - use cache inhibit pages */
 137                err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
 138#endif
 139
 140                SetPageReserved(page);
 141                page++;
 142        }
 143
 144        /* Free the otherwise unused pages. */
 145        while (page < end) {
 146                __free_page(page);
 147                page++;
 148        }
 149
 150        if (err) {
 151                free_pages(vaddr, order);
 152                return NULL;
 153        }
 154
 155        return ret;
 156}
 157EXPORT_SYMBOL(consistent_alloc);
 158
 159/*
 160 * free page(s) as defined by the above mapping.
 161 */
 162void consistent_free(size_t size, void *vaddr)
 163{
 164        struct page *page;
 165
 166        if (in_interrupt())
 167                BUG();
 168
 169        size = PAGE_ALIGN(size);
 170
 171#ifndef CONFIG_MMU
 172        /* Clear SHADOW_MASK bit in address, and free as per usual */
 173# ifdef CONFIG_XILINX_UNCACHED_SHADOW
 174        vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
 175# endif
 176        page = virt_to_page(vaddr);
 177
 178        do {
 179                ClearPageReserved(page);
 180                __free_page(page);
 181                page++;
 182        } while (size -= PAGE_SIZE);
 183#else
 184        do {
 185                pte_t *ptep;
 186                unsigned long pfn;
 187
 188                ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
 189                                                (unsigned int)vaddr),
 190                                        (unsigned int)vaddr),
 191                                (unsigned int)vaddr);
 192                if (!pte_none(*ptep) && pte_present(*ptep)) {
 193                        pfn = pte_pfn(*ptep);
 194                        pte_clear(&init_mm, (unsigned int)vaddr, ptep);
 195                        if (pfn_valid(pfn)) {
 196                                page = pfn_to_page(pfn);
 197
 198                                ClearPageReserved(page);
 199                                __free_page(page);
 200                        }
 201                }
 202                vaddr += PAGE_SIZE;
 203        } while (size -= PAGE_SIZE);
 204
 205        /* flush tlb */
 206        flush_tlb_all();
 207#endif
 208}
 209EXPORT_SYMBOL(consistent_free);
 210
 211/*
 212 * make an area consistent.
 213 */
 214void consistent_sync(void *vaddr, size_t size, int direction)
 215{
 216        unsigned long start;
 217        unsigned long end;
 218
 219        start = (unsigned long)vaddr;
 220
 221        /* Convert start address back down to unshadowed memory region */
 222#ifdef CONFIG_XILINX_UNCACHED_SHADOW
 223        start &= ~UNCACHED_SHADOW_MASK;
 224#endif
 225        end = start + size;
 226
 227        switch (direction) {
 228        case PCI_DMA_NONE:
 229                BUG();
 230        case PCI_DMA_FROMDEVICE:        /* invalidate only */
 231                invalidate_dcache_range(start, end);
 232                break;
 233        case PCI_DMA_TODEVICE:          /* writeback only */
 234                flush_dcache_range(start, end);
 235                break;
 236        case PCI_DMA_BIDIRECTIONAL:     /* writeback and invalidate */
 237                flush_dcache_range(start, end);
 238                break;
 239        }
 240}
 241EXPORT_SYMBOL(consistent_sync);
 242
 243/*
 244 * consistent_sync_page makes memory consistent. identical
 245 * to consistent_sync, but takes a struct page instead of a
 246 * virtual address
 247 */
 248void consistent_sync_page(struct page *page, unsigned long offset,
 249        size_t size, int direction)
 250{
 251        unsigned long start = (unsigned long)page_address(page) + offset;
 252        consistent_sync((void *)start, size, direction);
 253}
 254EXPORT_SYMBOL(consistent_sync_page);
 255