linux/arch/arm/mm/ioremap.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/ioremap.c
   3 *
   4 * Re-map IO memory to kernel address space so that we can access it.
   5 *
   6 * (C) Copyright 1995 1996 Linus Torvalds
   7 *
   8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
   9 * Hacked to allow all architectures to build, and various cleanups
  10 * by Russell King
  11 *
  12 * This allows a driver to remap an arbitrary region of bus memory into
  13 * virtual space.  One should *only* use readl, writel, memcpy_toio and
  14 * so on with such remapped areas.
  15 *
  16 * Because the ARM only has a 32-bit address space we can't address the
  17 * whole of the (physical) PCI space at once.  PCI huge-mode addressing
  18 * allows us to circumvent this restriction by splitting PCI space into
  19 * two 2GB chunks and mapping only one at a time into processor memory.
  20 * We use MMU protection domains to trap any attempt to access the bank
  21 * that is not currently mapped.  (This isn't fully implemented yet.)
  22 */
  23#include <linux/module.h>
  24#include <linux/errno.h>
  25#include <linux/mm.h>
  26#include <linux/vmalloc.h>
  27#include <linux/io.h>
  28#include <linux/sizes.h>
  29
  30#include <asm/cp15.h>
  31#include <asm/cputype.h>
  32#include <asm/cacheflush.h>
  33#include <asm/early_ioremap.h>
  34#include <asm/mmu_context.h>
  35#include <asm/pgalloc.h>
  36#include <asm/tlbflush.h>
  37#include <asm/system_info.h>
  38
  39#include <asm/mach/map.h>
  40#include <asm/mach/pci.h>
  41#include "mm.h"
  42
  43
  44LIST_HEAD(static_vmlist);
  45
  46static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
  47                        size_t size, unsigned int mtype)
  48{
  49        struct static_vm *svm;
  50        struct vm_struct *vm;
  51
  52        list_for_each_entry(svm, &static_vmlist, list) {
  53                vm = &svm->vm;
  54                if (!(vm->flags & VM_ARM_STATIC_MAPPING))
  55                        continue;
  56                if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
  57                        continue;
  58
  59                if (vm->phys_addr > paddr ||
  60                        paddr + size - 1 > vm->phys_addr + vm->size - 1)
  61                        continue;
  62
  63                return svm;
  64        }
  65
  66        return NULL;
  67}
  68
  69struct static_vm *find_static_vm_vaddr(void *vaddr)
  70{
  71        struct static_vm *svm;
  72        struct vm_struct *vm;
  73
  74        list_for_each_entry(svm, &static_vmlist, list) {
  75                vm = &svm->vm;
  76
  77                /* static_vmlist is ascending order */
  78                if (vm->addr > vaddr)
  79                        break;
  80
  81                if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
  82                        return svm;
  83        }
  84
  85        return NULL;
  86}
  87
  88void __init add_static_vm_early(struct static_vm *svm)
  89{
  90        struct static_vm *curr_svm;
  91        struct vm_struct *vm;
  92        void *vaddr;
  93
  94        vm = &svm->vm;
  95        vm_area_add_early(vm);
  96        vaddr = vm->addr;
  97
  98        list_for_each_entry(curr_svm, &static_vmlist, list) {
  99                vm = &curr_svm->vm;
 100
 101                if (vm->addr > vaddr)
 102                        break;
 103        }
 104        list_add_tail(&svm->list, &curr_svm->list);
 105}
 106
 107int ioremap_page(unsigned long virt, unsigned long phys,
 108                 const struct mem_type *mtype)
 109{
 110        return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
 111                                  __pgprot(mtype->prot_pte));
 112}
 113EXPORT_SYMBOL(ioremap_page);
 114
 115void __check_vmalloc_seq(struct mm_struct *mm)
 116{
 117        unsigned int seq;
 118
 119        do {
 120                seq = init_mm.context.vmalloc_seq;
 121                memcpy(pgd_offset(mm, VMALLOC_START),
 122                       pgd_offset_k(VMALLOC_START),
 123                       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
 124                                        pgd_index(VMALLOC_START)));
 125                mm->context.vmalloc_seq = seq;
 126        } while (seq != init_mm.context.vmalloc_seq);
 127}
 128
 129#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 130/*
 131 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 132 * the other CPUs will not see this change until their next context switch.
 133 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 134 * which requires the new ioremap'd region to be referenced, the CPU will
 135 * reference the _old_ region.
 136 *
 137 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 138 * mask the size back to 1MB aligned or we will overflow in the loop below.
 139 */
 140static void unmap_area_sections(unsigned long virt, unsigned long size)
 141{
 142        unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
 143        pgd_t *pgd;
 144        pud_t *pud;
 145        pmd_t *pmdp;
 146
 147        flush_cache_vunmap(addr, end);
 148        pgd = pgd_offset_k(addr);
 149        pud = pud_offset(pgd, addr);
 150        pmdp = pmd_offset(pud, addr);
 151        do {
 152                pmd_t pmd = *pmdp;
 153
 154                if (!pmd_none(pmd)) {
 155                        /*
 156                         * Clear the PMD from the page table, and
 157                         * increment the vmalloc sequence so others
 158                         * notice this change.
 159                         *
 160                         * Note: this is still racy on SMP machines.
 161                         */
 162                        pmd_clear(pmdp);
 163                        init_mm.context.vmalloc_seq++;
 164
 165                        /*
 166                         * Free the page table, if there was one.
 167                         */
 168                        if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
 169                                pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
 170                }
 171
 172                addr += PMD_SIZE;
 173                pmdp += 2;
 174        } while (addr < end);
 175
 176        /*
 177         * Ensure that the active_mm is up to date - we want to
 178         * catch any use-after-iounmap cases.
 179         */
 180        if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
 181                __check_vmalloc_seq(current->active_mm);
 182
 183        flush_tlb_kernel_range(virt, end);
 184}
 185
 186static int
 187remap_area_sections(unsigned long virt, unsigned long pfn,
 188                    size_t size, const struct mem_type *type)
 189{
 190        unsigned long addr = virt, end = virt + size;
 191        pgd_t *pgd;
 192        pud_t *pud;
 193        pmd_t *pmd;
 194
 195        /*
 196         * Remove and free any PTE-based mapping, and
 197         * sync the current kernel mapping.
 198         */
 199        unmap_area_sections(virt, size);
 200
 201        pgd = pgd_offset_k(addr);
 202        pud = pud_offset(pgd, addr);
 203        pmd = pmd_offset(pud, addr);
 204        do {
 205                pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
 206                pfn += SZ_1M >> PAGE_SHIFT;
 207                pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
 208                pfn += SZ_1M >> PAGE_SHIFT;
 209                flush_pmd_entry(pmd);
 210
 211                addr += PMD_SIZE;
 212                pmd += 2;
 213        } while (addr < end);
 214
 215        return 0;
 216}
 217
 218static int
 219remap_area_supersections(unsigned long virt, unsigned long pfn,
 220                         size_t size, const struct mem_type *type)
 221{
 222        unsigned long addr = virt, end = virt + size;
 223        pgd_t *pgd;
 224        pud_t *pud;
 225        pmd_t *pmd;
 226
 227        /*
 228         * Remove and free any PTE-based mapping, and
 229         * sync the current kernel mapping.
 230         */
 231        unmap_area_sections(virt, size);
 232
 233        pgd = pgd_offset_k(virt);
 234        pud = pud_offset(pgd, addr);
 235        pmd = pmd_offset(pud, addr);
 236        do {
 237                unsigned long super_pmd_val, i;
 238
 239                super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
 240                                PMD_SECT_SUPER;
 241                super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
 242
 243                for (i = 0; i < 8; i++) {
 244                        pmd[0] = __pmd(super_pmd_val);
 245                        pmd[1] = __pmd(super_pmd_val);
 246                        flush_pmd_entry(pmd);
 247
 248                        addr += PMD_SIZE;
 249                        pmd += 2;
 250                }
 251
 252                pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
 253        } while (addr < end);
 254
 255        return 0;
 256}
 257#endif
 258
 259static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
 260        unsigned long offset, size_t size, unsigned int mtype, void *caller)
 261{
 262        const struct mem_type *type;
 263        int err;
 264        unsigned long addr;
 265        struct vm_struct *area;
 266        phys_addr_t paddr = __pfn_to_phys(pfn);
 267
 268#ifndef CONFIG_ARM_LPAE
 269        /*
 270         * High mappings must be supersection aligned
 271         */
 272        if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
 273                return NULL;
 274#endif
 275
 276        type = get_mem_type(mtype);
 277        if (!type)
 278                return NULL;
 279
 280        /*
 281         * Page align the mapping size, taking account of any offset.
 282         */
 283        size = PAGE_ALIGN(offset + size);
 284
 285        /*
 286         * Try to reuse one of the static mapping whenever possible.
 287         */
 288        if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
 289                struct static_vm *svm;
 290
 291                svm = find_static_vm_paddr(paddr, size, mtype);
 292                if (svm) {
 293                        addr = (unsigned long)svm->vm.addr;
 294                        addr += paddr - svm->vm.phys_addr;
 295                        return (void __iomem *) (offset + addr);
 296                }
 297        }
 298
 299        /*
 300         * Don't allow RAM to be mapped with mismatched attributes - this
 301         * causes problems with ARMv6+
 302         */
 303        if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
 304                return NULL;
 305
 306        area = get_vm_area_caller(size, VM_IOREMAP, caller);
 307        if (!area)
 308                return NULL;
 309        addr = (unsigned long)area->addr;
 310        area->phys_addr = paddr;
 311
 312#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 313        if (DOMAIN_IO == 0 &&
 314            (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
 315               cpu_is_xsc3()) && pfn >= 0x100000 &&
 316               !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
 317                area->flags |= VM_ARM_SECTION_MAPPING;
 318                err = remap_area_supersections(addr, pfn, size, type);
 319        } else if (!((paddr | size | addr) & ~PMD_MASK)) {
 320                area->flags |= VM_ARM_SECTION_MAPPING;
 321                err = remap_area_sections(addr, pfn, size, type);
 322        } else
 323#endif
 324                err = ioremap_page_range(addr, addr + size, paddr,
 325                                         __pgprot(type->prot_pte));
 326
 327        if (err) {
 328                vunmap((void *)addr);
 329                return NULL;
 330        }
 331
 332        flush_cache_vmap(addr, addr + size);
 333        return (void __iomem *) (offset + addr);
 334}
 335
 336void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
 337        unsigned int mtype, void *caller)
 338{
 339        phys_addr_t last_addr;
 340        unsigned long offset = phys_addr & ~PAGE_MASK;
 341        unsigned long pfn = __phys_to_pfn(phys_addr);
 342
 343        /*
 344         * Don't allow wraparound or zero size
 345         */
 346        last_addr = phys_addr + size - 1;
 347        if (!size || last_addr < phys_addr)
 348                return NULL;
 349
 350        return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
 351                        caller);
 352}
 353
 354/*
 355 * Remap an arbitrary physical address space into the kernel virtual
 356 * address space. Needed when the kernel wants to access high addresses
 357 * directly.
 358 *
 359 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 360 * have to convert them into an offset in a page-aligned mapping, but the
 361 * caller shouldn't need to know that small detail.
 362 */
 363void __iomem *
 364__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
 365                  unsigned int mtype)
 366{
 367        return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
 368                                        __builtin_return_address(0));
 369}
 370EXPORT_SYMBOL(__arm_ioremap_pfn);
 371
 372void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
 373                                      unsigned int, void *) =
 374        __arm_ioremap_caller;
 375
 376void __iomem *ioremap(resource_size_t res_cookie, size_t size)
 377{
 378        return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
 379                                   __builtin_return_address(0));
 380}
 381EXPORT_SYMBOL(ioremap);
 382
 383void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
 384        __alias(ioremap_cached);
 385
 386void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
 387{
 388        return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
 389                                   __builtin_return_address(0));
 390}
 391EXPORT_SYMBOL(ioremap_cache);
 392EXPORT_SYMBOL(ioremap_cached);
 393
 394void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
 395{
 396        return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
 397                                   __builtin_return_address(0));
 398}
 399EXPORT_SYMBOL(ioremap_wc);
 400
 401/*
 402 * Remap an arbitrary physical address space into the kernel virtual
 403 * address space as memory. Needed when the kernel wants to execute
 404 * code in external memory. This is needed for reprogramming source
 405 * clocks that would affect normal memory for example. Please see
 406 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
 407 */
 408void __iomem *
 409__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
 410{
 411        unsigned int mtype;
 412
 413        if (cached)
 414                mtype = MT_MEMORY_RWX;
 415        else
 416                mtype = MT_MEMORY_RWX_NONCACHED;
 417
 418        return __arm_ioremap_caller(phys_addr, size, mtype,
 419                        __builtin_return_address(0));
 420}
 421
 422void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
 423{
 424        return (__force void *)arch_ioremap_caller(phys_addr, size,
 425                                                   MT_MEMORY_RW,
 426                                                   __builtin_return_address(0));
 427}
 428
 429void __iounmap(volatile void __iomem *io_addr)
 430{
 431        void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
 432        struct static_vm *svm;
 433
 434        /* If this is a static mapping, we must leave it alone */
 435        svm = find_static_vm_vaddr(addr);
 436        if (svm)
 437                return;
 438
 439#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 440        {
 441                struct vm_struct *vm;
 442
 443                vm = find_vm_area(addr);
 444
 445                /*
 446                 * If this is a section based mapping we need to handle it
 447                 * specially as the VM subsystem does not know how to handle
 448                 * such a beast.
 449                 */
 450                if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
 451                        unmap_area_sections((unsigned long)vm->addr, vm->size);
 452        }
 453#endif
 454
 455        vunmap(addr);
 456}
 457
 458void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
 459
 460void iounmap(volatile void __iomem *cookie)
 461{
 462        arch_iounmap(cookie);
 463}
 464EXPORT_SYMBOL(iounmap);
 465
 466#ifdef CONFIG_PCI
 467static int pci_ioremap_mem_type = MT_DEVICE;
 468
 469void pci_ioremap_set_mem_type(int mem_type)
 470{
 471        pci_ioremap_mem_type = mem_type;
 472}
 473
 474int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
 475{
 476        BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
 477
 478        return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
 479                                  PCI_IO_VIRT_BASE + offset + SZ_64K,
 480                                  phys_addr,
 481                                  __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
 482}
 483EXPORT_SYMBOL_GPL(pci_ioremap_io);
 484
 485void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
 486{
 487        return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
 488                                   __builtin_return_address(0));
 489}
 490EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
 491#endif
 492
 493/*
 494 * Must be called after early_fixmap_init
 495 */
 496void __init early_ioremap_init(void)
 497{
 498        early_ioremap_setup();
 499}
 500