linux/arch/sh/mm/ioremap.c
<<
>>
Prefs
   1/*
   2 * arch/sh/mm/ioremap.c
   3 *
   4 * (C) Copyright 1995 1996 Linus Torvalds
   5 * (C) Copyright 2005 - 2010  Paul Mundt
   6 *
   7 * Re-map IO memory to kernel address space so that we can access it.
   8 * This is needed for high PCI addresses that aren't mapped in the
   9 * 640k-1MB IO memory area on PC's
  10 *
  11 * This file is subject to the terms and conditions of the GNU General
  12 * Public License. See the file "COPYING" in the main directory of this
  13 * archive for more details.
  14 */
  15#include <linux/vmalloc.h>
  16#include <linux/module.h>
  17#include <linux/slab.h>
  18#include <linux/mm.h>
  19#include <linux/pci.h>
  20#include <linux/io.h>
  21#include <asm/io_trapped.h>
  22#include <asm/page.h>
  23#include <asm/pgalloc.h>
  24#include <asm/addrspace.h>
  25#include <asm/cacheflush.h>
  26#include <asm/tlbflush.h>
  27#include <asm/mmu.h>
  28#include "ioremap.h"
  29
  30/*
  31 * On 32-bit SH, we traditionally have the whole physical address space mapped
  32 * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
  33 * anything but place the address in the proper segment.  This is true for P1
  34 * and P2 addresses, as well as some P3 ones.  However, most of the P3 addresses
  35 * and newer cores using extended addressing need to map through page tables, so
  36 * the ioremap() implementation becomes a bit more complicated.
  37 */
  38#ifdef CONFIG_29BIT
  39static void __iomem *
  40__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
  41{
  42        phys_addr_t last_addr = offset + size - 1;
  43
  44        /*
  45         * For P1 and P2 space this is trivial, as everything is already
  46         * mapped. Uncached access for P1 addresses are done through P2.
  47         * In the P3 case or for addresses outside of the 29-bit space,
  48         * mapping must be done by the PMB or by using page tables.
  49         */
  50        if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
  51                u64 flags = pgprot_val(prot);
  52
  53                /*
  54                 * Anything using the legacy PTEA space attributes needs
  55                 * to be kicked down to page table mappings.
  56                 */
  57                if (unlikely(flags & _PAGE_PCC_MASK))
  58                        return NULL;
  59                if (unlikely(flags & _PAGE_CACHABLE))
  60                        return (void __iomem *)P1SEGADDR(offset);
  61
  62                return (void __iomem *)P2SEGADDR(offset);
  63        }
  64
  65        /* P4 above the store queues are always mapped. */
  66        if (unlikely(offset >= P3_ADDR_MAX))
  67                return (void __iomem *)P4SEGADDR(offset);
  68
  69        return NULL;
  70}
  71#else
  72#define __ioremap_29bit(offset, size, prot)             NULL
  73#endif /* CONFIG_29BIT */
  74
  75/*
  76 * Remap an arbitrary physical address space into the kernel virtual
  77 * address space. Needed when the kernel wants to access high addresses
  78 * directly.
  79 *
  80 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  81 * have to convert them into an offset in a page-aligned mapping, but the
  82 * caller shouldn't need to know that small detail.
  83 */
  84void __iomem * __ref
  85__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
  86                 pgprot_t pgprot, void *caller)
  87{
  88        struct vm_struct *area;
  89        unsigned long offset, last_addr, addr, orig_addr;
  90        void __iomem *mapped;
  91
  92        mapped = __ioremap_trapped(phys_addr, size);
  93        if (mapped)
  94                return mapped;
  95
  96        mapped = __ioremap_29bit(phys_addr, size, pgprot);
  97        if (mapped)
  98                return mapped;
  99
 100        /* Don't allow wraparound or zero size */
 101        last_addr = phys_addr + size - 1;
 102        if (!size || last_addr < phys_addr)
 103                return NULL;
 104
 105        /*
 106         * If we can't yet use the regular approach, go the fixmap route.
 107         */
 108        if (!mem_init_done)
 109                return ioremap_fixed(phys_addr, size, pgprot);
 110
 111        /*
 112         * First try to remap through the PMB.
 113         * PMB entries are all pre-faulted.
 114         */
 115        mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
 116        if (mapped && !IS_ERR(mapped))
 117                return mapped;
 118
 119        /*
 120         * Mappings have to be page-aligned
 121         */
 122        offset = phys_addr & ~PAGE_MASK;
 123        phys_addr &= PAGE_MASK;
 124        size = PAGE_ALIGN(last_addr+1) - phys_addr;
 125
 126        /*
 127         * Ok, go for it..
 128         */
 129        area = get_vm_area_caller(size, VM_IOREMAP, caller);
 130        if (!area)
 131                return NULL;
 132        area->phys_addr = phys_addr;
 133        orig_addr = addr = (unsigned long)area->addr;
 134
 135        if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
 136                vunmap((void *)orig_addr);
 137                return NULL;
 138        }
 139
 140        return (void __iomem *)(offset + (char *)orig_addr);
 141}
 142EXPORT_SYMBOL(__ioremap_caller);
 143
 144/*
 145 * Simple checks for non-translatable mappings.
 146 */
 147static inline int iomapping_nontranslatable(unsigned long offset)
 148{
 149#ifdef CONFIG_29BIT
 150        /*
 151         * In 29-bit mode this includes the fixed P1/P2 areas, as well as
 152         * parts of P3.
 153         */
 154        if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX)
 155                return 1;
 156#endif
 157
 158        return 0;
 159}
 160
 161void iounmap(void __iomem *addr)
 162{
 163        unsigned long vaddr = (unsigned long __force)addr;
 164        struct vm_struct *p;
 165
 166        /*
 167         * Nothing to do if there is no translatable mapping.
 168         */
 169        if (iomapping_nontranslatable(vaddr))
 170                return;
 171
 172        /*
 173         * There's no VMA if it's from an early fixed mapping.
 174         */
 175        if (iounmap_fixed(addr) == 0)
 176                return;
 177
 178        /*
 179         * If the PMB handled it, there's nothing else to do.
 180         */
 181        if (pmb_unmap(addr) == 0)
 182                return;
 183
 184        p = remove_vm_area((void *)(vaddr & PAGE_MASK));
 185        if (!p) {
 186                printk(KERN_ERR "%s: bad address %p\n", __func__, addr);
 187                return;
 188        }
 189
 190        kfree(p);
 191}
 192EXPORT_SYMBOL(iounmap);
 193