linux/arch/arc/mm/ioremap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   4 */
   5
   6#include <linux/vmalloc.h>
   7#include <linux/init.h>
   8#include <linux/module.h>
   9#include <linux/io.h>
  10#include <linux/mm.h>
  11#include <linux/slab.h>
  12#include <linux/cache.h>
  13
  14static inline bool arc_uncached_addr_space(phys_addr_t paddr)
  15{
  16        if (is_isa_arcompact()) {
  17                if (paddr >= ARC_UNCACHED_ADDR_SPACE)
  18                        return true;
  19        } else if (paddr >= perip_base && paddr <= perip_end) {
  20                return true;
  21        }
  22
  23        return false;
  24}
  25
  26void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
  27{
  28        phys_addr_t end;
  29
  30        /* Don't allow wraparound or zero size */
  31        end = paddr + size - 1;
  32        if (!size || (end < paddr))
  33                return NULL;
  34
  35        /*
  36         * If the region is h/w uncached, MMU mapping can be elided as optim
  37         * The cast to u32 is fine as this region can only be inside 4GB
  38         */
  39        if (arc_uncached_addr_space(paddr))
  40                return (void __iomem *)(u32)paddr;
  41
  42        return ioremap_prot(paddr, size,
  43                            pgprot_val(pgprot_noncached(PAGE_KERNEL)));
  44}
  45EXPORT_SYMBOL(ioremap);
  46
  47/*
  48 * ioremap with access flags
  49 * Cache semantics wise it is same as ioremap - "forced" uncached.
  50 * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
  51 * ARC hardware uncached region, this one still goes thru the MMU as caller
  52 * might need finer access control (R/W/X)
  53 */
  54void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
  55                           unsigned long flags)
  56{
  57        unsigned int off;
  58        unsigned long vaddr;
  59        struct vm_struct *area;
  60        phys_addr_t end;
  61        pgprot_t prot = __pgprot(flags);
  62
  63        /* Don't allow wraparound, zero size */
  64        end = paddr + size - 1;
  65        if ((!size) || (end < paddr))
  66                return NULL;
  67
  68        /* An early platform driver might end up here */
  69        if (!slab_is_available())
  70                return NULL;
  71
  72        /* force uncached */
  73        prot = pgprot_noncached(prot);
  74
  75        /* Mappings have to be page-aligned */
  76        off = paddr & ~PAGE_MASK;
  77        paddr &= PAGE_MASK_PHYS;
  78        size = PAGE_ALIGN(end + 1) - paddr;
  79
  80        /*
  81         * Ok, go for it..
  82         */
  83        area = get_vm_area(size, VM_IOREMAP);
  84        if (!area)
  85                return NULL;
  86        area->phys_addr = paddr;
  87        vaddr = (unsigned long)area->addr;
  88        if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
  89                vunmap((void __force *)vaddr);
  90                return NULL;
  91        }
  92        return (void __iomem *)(off + (char __iomem *)vaddr);
  93}
  94EXPORT_SYMBOL(ioremap_prot);
  95
  96
  97void iounmap(const void __iomem *addr)
  98{
  99        /* weird double cast to handle phys_addr_t > 32 bits */
 100        if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
 101                return;
 102
 103        vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
 104}
 105EXPORT_SYMBOL(iounmap);
 106