linux/arch/arm/mm/ioremap.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/ioremap.c
   3 *
   4 * Re-map IO memory to kernel address space so that we can access it.
   5 *
   6 * (C) Copyright 1995 1996 Linus Torvalds
   7 *
   8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
   9 * Hacked to allow all architectures to build, and various cleanups
  10 * by Russell King
  11 *
  12 * This allows a driver to remap an arbitrary region of bus memory into
  13 * virtual space.  One should *only* use readl, writel, memcpy_toio and
  14 * so on with such remapped areas.
  15 *
  16 * Because the ARM only has a 32-bit address space we can't address the
  17 * whole of the (physical) PCI space at once.  PCI huge-mode addressing
  18 * allows us to circumvent this restriction by splitting PCI space into
  19 * two 2GB chunks and mapping only one at a time into processor memory.
  20 * We use MMU protection domains to trap any attempt to access the bank
  21 * that is not currently mapped.  (This isn't fully implemented yet.)
  22 */
  23#include <linux/module.h>
  24#include <linux/errno.h>
  25#include <linux/mm.h>
  26#include <linux/vmalloc.h>
  27#include <linux/io.h>
  28#include <linux/sizes.h>
  29
  30#include <asm/cp15.h>
  31#include <asm/cputype.h>
  32#include <asm/cacheflush.h>
  33#include <asm/mmu_context.h>
  34#include <asm/pgalloc.h>
  35#include <asm/tlbflush.h>
  36#include <asm/system_info.h>
  37
  38#include <asm/mach/map.h>
  39#include <asm/mach/pci.h>
  40#include "mm.h"
  41
  42
  43LIST_HEAD(static_vmlist);
  44
  45static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
  46                        size_t size, unsigned int mtype)
  47{
  48        struct static_vm *svm;
  49        struct vm_struct *vm;
  50
  51        list_for_each_entry(svm, &static_vmlist, list) {
  52                vm = &svm->vm;
  53                if (!(vm->flags & VM_ARM_STATIC_MAPPING))
  54                        continue;
  55                if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
  56                        continue;
  57
  58                if (vm->phys_addr > paddr ||
  59                        paddr + size - 1 > vm->phys_addr + vm->size - 1)
  60                        continue;
  61
  62                return svm;
  63        }
  64
  65        return NULL;
  66}
  67
  68struct static_vm *find_static_vm_vaddr(void *vaddr)
  69{
  70        struct static_vm *svm;
  71        struct vm_struct *vm;
  72
  73        list_for_each_entry(svm, &static_vmlist, list) {
  74                vm = &svm->vm;
  75
  76                /* static_vmlist is ascending order */
  77                if (vm->addr > vaddr)
  78                        break;
  79
  80                if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
  81                        return svm;
  82        }
  83
  84        return NULL;
  85}
  86
  87void __init add_static_vm_early(struct static_vm *svm)
  88{
  89        struct static_vm *curr_svm;
  90        struct vm_struct *vm;
  91        void *vaddr;
  92
  93        vm = &svm->vm;
  94        vm_area_add_early(vm);
  95        vaddr = vm->addr;
  96
  97        list_for_each_entry(curr_svm, &static_vmlist, list) {
  98                vm = &curr_svm->vm;
  99
 100                if (vm->addr > vaddr)
 101                        break;
 102        }
 103        list_add_tail(&svm->list, &curr_svm->list);
 104}
 105
 106int ioremap_page(unsigned long virt, unsigned long phys,
 107                 const struct mem_type *mtype)
 108{
 109        return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
 110                                  __pgprot(mtype->prot_pte));
 111}
 112EXPORT_SYMBOL(ioremap_page);
 113
 114void __check_vmalloc_seq(struct mm_struct *mm)
 115{
 116        unsigned int seq;
 117
 118        do {
 119                seq = init_mm.context.vmalloc_seq;
 120                memcpy(pgd_offset(mm, VMALLOC_START),
 121                       pgd_offset_k(VMALLOC_START),
 122                       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
 123                                        pgd_index(VMALLOC_START)));
 124                mm->context.vmalloc_seq = seq;
 125        } while (seq != init_mm.context.vmalloc_seq);
 126}
 127
 128#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 129/*
 130 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 131 * the other CPUs will not see this change until their next context switch.
 132 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 133 * which requires the new ioremap'd region to be referenced, the CPU will
 134 * reference the _old_ region.
 135 *
 136 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 137 * mask the size back to 1MB aligned or we will overflow in the loop below.
 138 */
 139static void unmap_area_sections(unsigned long virt, unsigned long size)
 140{
 141        unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
 142        pgd_t *pgd;
 143        pud_t *pud;
 144        pmd_t *pmdp;
 145
 146        flush_cache_vunmap(addr, end);
 147        pgd = pgd_offset_k(addr);
 148        pud = pud_offset(pgd, addr);
 149        pmdp = pmd_offset(pud, addr);
 150        do {
 151                pmd_t pmd = *pmdp;
 152
 153                if (!pmd_none(pmd)) {
 154                        /*
 155                         * Clear the PMD from the page table, and
 156                         * increment the vmalloc sequence so others
 157                         * notice this change.
 158                         *
 159                         * Note: this is still racy on SMP machines.
 160                         */
 161                        pmd_clear(pmdp);
 162                        init_mm.context.vmalloc_seq++;
 163
 164                        /*
 165                         * Free the page table, if there was one.
 166                         */
 167                        if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
 168                                pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
 169                }
 170
 171                addr += PMD_SIZE;
 172                pmdp += 2;
 173        } while (addr < end);
 174
 175        /*
 176         * Ensure that the active_mm is up to date - we want to
 177         * catch any use-after-iounmap cases.
 178         */
 179        if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
 180                __check_vmalloc_seq(current->active_mm);
 181
 182        flush_tlb_kernel_range(virt, end);
 183}
 184
 185static int
 186remap_area_sections(unsigned long virt, unsigned long pfn,
 187                    size_t size, const struct mem_type *type)
 188{
 189        unsigned long addr = virt, end = virt + size;
 190        pgd_t *pgd;
 191        pud_t *pud;
 192        pmd_t *pmd;
 193
 194        /*
 195         * Remove and free any PTE-based mapping, and
 196         * sync the current kernel mapping.
 197         */
 198        unmap_area_sections(virt, size);
 199
 200        pgd = pgd_offset_k(addr);
 201        pud = pud_offset(pgd, addr);
 202        pmd = pmd_offset(pud, addr);
 203        do {
 204                pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
 205                pfn += SZ_1M >> PAGE_SHIFT;
 206                pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
 207                pfn += SZ_1M >> PAGE_SHIFT;
 208                flush_pmd_entry(pmd);
 209
 210                addr += PMD_SIZE;
 211                pmd += 2;
 212        } while (addr < end);
 213
 214        return 0;
 215}
 216
 217static int
 218remap_area_supersections(unsigned long virt, unsigned long pfn,
 219                         size_t size, const struct mem_type *type)
 220{
 221        unsigned long addr = virt, end = virt + size;
 222        pgd_t *pgd;
 223        pud_t *pud;
 224        pmd_t *pmd;
 225
 226        /*
 227         * Remove and free any PTE-based mapping, and
 228         * sync the current kernel mapping.
 229         */
 230        unmap_area_sections(virt, size);
 231
 232        pgd = pgd_offset_k(virt);
 233        pud = pud_offset(pgd, addr);
 234        pmd = pmd_offset(pud, addr);
 235        do {
 236                unsigned long super_pmd_val, i;
 237
 238                super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
 239                                PMD_SECT_SUPER;
 240                super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
 241
 242                for (i = 0; i < 8; i++) {
 243                        pmd[0] = __pmd(super_pmd_val);
 244                        pmd[1] = __pmd(super_pmd_val);
 245                        flush_pmd_entry(pmd);
 246
 247                        addr += PMD_SIZE;
 248                        pmd += 2;
 249                }
 250
 251                pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
 252        } while (addr < end);
 253
 254        return 0;
 255}
 256#endif
 257
 258void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
 259        unsigned long offset, size_t size, unsigned int mtype, void *caller)
 260{
 261        const struct mem_type *type;
 262        int err;
 263        unsigned long addr;
 264        struct vm_struct *area;
 265        phys_addr_t paddr = __pfn_to_phys(pfn);
 266
 267#ifndef CONFIG_ARM_LPAE
 268        /*
 269         * High mappings must be supersection aligned
 270         */
 271        if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
 272                return NULL;
 273#endif
 274
 275        type = get_mem_type(mtype);
 276        if (!type)
 277                return NULL;
 278
 279        /*
 280         * Page align the mapping size, taking account of any offset.
 281         */
 282        size = PAGE_ALIGN(offset + size);
 283
 284        /*
 285         * Try to reuse one of the static mapping whenever possible.
 286         */
 287        if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
 288                struct static_vm *svm;
 289
 290                svm = find_static_vm_paddr(paddr, size, mtype);
 291                if (svm) {
 292                        addr = (unsigned long)svm->vm.addr;
 293                        addr += paddr - svm->vm.phys_addr;
 294                        return (void __iomem *) (offset + addr);
 295                }
 296        }
 297
 298        /*
 299         * Don't allow RAM to be mapped - this causes problems with ARMv6+
 300         */
 301        if (WARN_ON(pfn_valid(pfn)))
 302                return NULL;
 303
 304        area = get_vm_area_caller(size, VM_IOREMAP, caller);
 305        if (!area)
 306                return NULL;
 307        addr = (unsigned long)area->addr;
 308        area->phys_addr = paddr;
 309
 310#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 311        if (DOMAIN_IO == 0 &&
 312            (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
 313               cpu_is_xsc3()) && pfn >= 0x100000 &&
 314               !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
 315                area->flags |= VM_ARM_SECTION_MAPPING;
 316                err = remap_area_supersections(addr, pfn, size, type);
 317        } else if (!((paddr | size | addr) & ~PMD_MASK)) {
 318                area->flags |= VM_ARM_SECTION_MAPPING;
 319                err = remap_area_sections(addr, pfn, size, type);
 320        } else
 321#endif
 322                err = ioremap_page_range(addr, addr + size, paddr,
 323                                         __pgprot(type->prot_pte));
 324
 325        if (err) {
 326                vunmap((void *)addr);
 327                return NULL;
 328        }
 329
 330        flush_cache_vmap(addr, addr + size);
 331        return (void __iomem *) (offset + addr);
 332}
 333
 334void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
 335        unsigned int mtype, void *caller)
 336{
 337        unsigned long last_addr;
 338        unsigned long offset = phys_addr & ~PAGE_MASK;
 339        unsigned long pfn = __phys_to_pfn(phys_addr);
 340
 341        /*
 342         * Don't allow wraparound or zero size
 343         */
 344        last_addr = phys_addr + size - 1;
 345        if (!size || last_addr < phys_addr)
 346                return NULL;
 347
 348        return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
 349                        caller);
 350}
 351
 352/*
 353 * Remap an arbitrary physical address space into the kernel virtual
 354 * address space. Needed when the kernel wants to access high addresses
 355 * directly.
 356 *
 357 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 358 * have to convert them into an offset in a page-aligned mapping, but the
 359 * caller shouldn't need to know that small detail.
 360 */
 361void __iomem *
 362__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
 363                  unsigned int mtype)
 364{
 365        return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
 366                        __builtin_return_address(0));
 367}
 368EXPORT_SYMBOL(__arm_ioremap_pfn);
 369
 370void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
 371                                      unsigned int, void *) =
 372        __arm_ioremap_caller;
 373
 374void __iomem *
 375__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
 376{
 377        return arch_ioremap_caller(phys_addr, size, mtype,
 378                __builtin_return_address(0));
 379}
 380EXPORT_SYMBOL(__arm_ioremap);
 381
 382/*
 383 * Remap an arbitrary physical address space into the kernel virtual
 384 * address space as memory. Needed when the kernel wants to execute
 385 * code in external memory. This is needed for reprogramming source
 386 * clocks that would affect normal memory for example. Please see
 387 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
 388 */
 389void __iomem *
 390__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
 391{
 392        unsigned int mtype;
 393
 394        if (cached)
 395                mtype = MT_MEMORY;
 396        else
 397                mtype = MT_MEMORY_NONCACHED;
 398
 399        return __arm_ioremap_caller(phys_addr, size, mtype,
 400                        __builtin_return_address(0));
 401}
 402
 403void __iounmap(volatile void __iomem *io_addr)
 404{
 405        void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
 406        struct static_vm *svm;
 407
 408        /* If this is a static mapping, we must leave it alone */
 409        svm = find_static_vm_vaddr(addr);
 410        if (svm)
 411                return;
 412
 413#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 414        {
 415                struct vm_struct *vm;
 416
 417                vm = find_vm_area(addr);
 418
 419                /*
 420                 * If this is a section based mapping we need to handle it
 421                 * specially as the VM subsystem does not know how to handle
 422                 * such a beast.
 423                 */
 424                if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
 425                        unmap_area_sections((unsigned long)vm->addr, vm->size);
 426        }
 427#endif
 428
 429        vunmap(addr);
 430}
 431
 432void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
 433
 434void __arm_iounmap(volatile void __iomem *io_addr)
 435{
 436        arch_iounmap(io_addr);
 437}
 438EXPORT_SYMBOL(__arm_iounmap);
 439
 440#ifdef CONFIG_PCI
 441int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
 442{
 443        BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
 444
 445        return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
 446                                  PCI_IO_VIRT_BASE + offset + SZ_64K,
 447                                  phys_addr,
 448                                  __pgprot(get_mem_type(MT_DEVICE)->prot_pte));
 449}
 450EXPORT_SYMBOL_GPL(pci_ioremap_io);
 451#endif
 452