linux/arch/m32r/mm/ioremap.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/m32r/mm/ioremap.c
   3 *
   4 *  Copyright (c) 2001, 2002  Hiroyuki Kondo
   5 *
   6 *  Taken from mips version.
   7 *    (C) Copyright 1995 1996 Linus Torvalds
   8 *    (C) Copyright 2001 Ralf Baechle
   9 */
  10
  11/*
  12 * This file is subject to the terms and conditions of the GNU General Public
  13 * License.  See the file "COPYING" in the main directory of this archive
  14 * for more details.
  15 *
  16 */
  17
  18#include <linux/module.h>
  19#include <asm/addrspace.h>
  20#include <asm/byteorder.h>
  21
  22#include <linux/vmalloc.h>
  23#include <linux/io.h>
  24#include <asm/pgalloc.h>
  25
  26/*
  27 * Generic mapping function (not visible outside):
  28 */
  29
  30/*
  31 * Remap an arbitrary physical address space into the kernel virtual
  32 * address space. Needed when the kernel wants to access high addresses
  33 * directly.
  34 *
  35 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  36 * have to convert them into an offset in a page-aligned mapping, but the
  37 * caller shouldn't need to know that small detail.
  38 */
  39
  40#define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL))
  41
  42void __iomem *
  43__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
  44{
  45        void __iomem * addr;
  46        struct vm_struct * area;
  47        unsigned long offset, last_addr;
  48        pgprot_t pgprot;
  49
  50        /* Don't allow wraparound or zero size */
  51        last_addr = phys_addr + size - 1;
  52        if (!size || last_addr < phys_addr)
  53                return NULL;
  54
  55        /*
  56         * Map objects in the low 512mb of address space using KSEG1, otherwise
  57         * map using page tables.
  58         */
  59        if (IS_LOW512(phys_addr) && IS_LOW512(phys_addr + size - 1))
  60                return (void *) KSEG1ADDR(phys_addr);
  61
  62        /*
  63         * Don't allow anybody to remap normal RAM that we're using..
  64         */
  65        if (phys_addr < virt_to_phys(high_memory)) {
  66                char *t_addr, *t_end;
  67                struct page *page;
  68
  69                t_addr = __va(phys_addr);
  70                t_end = t_addr + (size - 1);
  71
  72                for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
  73                        if(!PageReserved(page))
  74                                return NULL;
  75        }
  76
  77        pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ
  78                          | _PAGE_WRITE | flags);
  79
  80        /*
  81         * Mappings have to be page-aligned
  82         */
  83        offset = phys_addr & ~PAGE_MASK;
  84        phys_addr &= PAGE_MASK;
  85        size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  86
  87        /*
  88         * Ok, go for it..
  89         */
  90        area = get_vm_area(size, VM_IOREMAP);
  91        if (!area)
  92                return NULL;
  93        area->phys_addr = phys_addr;
  94        addr = (void __iomem *) area->addr;
  95        if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
  96                               phys_addr, pgprot)) {
  97                vunmap((void __force *) addr);
  98                return NULL;
  99        }
 100
 101        return (void __iomem *) (offset + (char __iomem *)addr);
 102}
 103
 104#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1)
 105
 106void iounmap(volatile void __iomem *addr)
 107{
 108        if (!IS_KSEG1(addr))
 109                vfree((void *) (PAGE_MASK & (unsigned long) addr));
 110}
 111
 112