linux/arch/arc/mm/ioremap.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/vmalloc.h>
  10#include <linux/init.h>
  11#include <linux/module.h>
  12#include <linux/io.h>
  13#include <linux/mm.h>
  14#include <linux/slab.h>
  15#include <linux/cache.h>
  16
  17static inline bool arc_uncached_addr_space(phys_addr_t paddr)
  18{
  19        if (is_isa_arcompact()) {
  20                if (paddr >= ARC_UNCACHED_ADDR_SPACE)
  21                        return true;
  22        } else if (paddr >= perip_base && paddr <= perip_end) {
  23                return true;
  24        }
  25
  26        return false;
  27}
  28
  29void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
  30{
  31        phys_addr_t end;
  32
  33        /* Don't allow wraparound or zero size */
  34        end = paddr + size - 1;
  35        if (!size || (end < paddr))
  36                return NULL;
  37
  38        /*
  39         * If the region is h/w uncached, MMU mapping can be elided as optim
  40         * The cast to u32 is fine as this region can only be inside 4GB
  41         */
  42        if (arc_uncached_addr_space(paddr))
  43                return (void __iomem *)(u32)paddr;
  44
  45        return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
  46}
  47EXPORT_SYMBOL(ioremap);
  48
  49/*
  50 * ioremap with access flags
  51 * Cache semantics wise it is same as ioremap - "forced" uncached.
  52 * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
  53 * ARC hardware uncached region, this one still goes thru the MMU as caller
  54 * might need finer access control (R/W/X)
  55 */
  56void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
  57                           unsigned long flags)
  58{
  59        unsigned long vaddr;
  60        struct vm_struct *area;
  61        phys_addr_t off, end;
  62        pgprot_t prot = __pgprot(flags);
  63
  64        /* Don't allow wraparound, zero size */
  65        end = paddr + size - 1;
  66        if ((!size) || (end < paddr))
  67                return NULL;
  68
  69        /* An early platform driver might end up here */
  70        if (!slab_is_available())
  71                return NULL;
  72
  73        /* force uncached */
  74        prot = pgprot_noncached(prot);
  75
  76        /* Mappings have to be page-aligned */
  77        off = paddr & ~PAGE_MASK;
  78        paddr &= PAGE_MASK;
  79        size = PAGE_ALIGN(end + 1) - paddr;
  80
  81        /*
  82         * Ok, go for it..
  83         */
  84        area = get_vm_area(size, VM_IOREMAP);
  85        if (!area)
  86                return NULL;
  87        area->phys_addr = paddr;
  88        vaddr = (unsigned long)area->addr;
  89        if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
  90                vunmap((void __force *)vaddr);
  91                return NULL;
  92        }
  93        return (void __iomem *)(off + (char __iomem *)vaddr);
  94}
  95EXPORT_SYMBOL(ioremap_prot);
  96
  97
  98void iounmap(const void __iomem *addr)
  99{
 100        /* weird double cast to handle phys_addr_t > 32 bits */
 101        if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
 102                return;
 103
 104        vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
 105}
 106EXPORT_SYMBOL(iounmap);
 107