linux/arch/arm/mm/ioremap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/arch/arm/mm/ioremap.c
   4 *
   5 * Re-map IO memory to kernel address space so that we can access it.
   6 *
   7 * (C) Copyright 1995 1996 Linus Torvalds
   8 *
   9 * Hacked for ARM by Phil Blundell <philb@gnu.org>
  10 * Hacked to allow all architectures to build, and various cleanups
  11 * by Russell King
  12 *
  13 * This allows a driver to remap an arbitrary region of bus memory into
  14 * virtual space.  One should *only* use readl, writel, memcpy_toio and
  15 * so on with such remapped areas.
  16 *
  17 * Because the ARM only has a 32-bit address space we can't address the
  18 * whole of the (physical) PCI space at once.  PCI huge-mode addressing
  19 * allows us to circumvent this restriction by splitting PCI space into
  20 * two 2GB chunks and mapping only one at a time into processor memory.
  21 * We use MMU protection domains to trap any attempt to access the bank
  22 * that is not currently mapped.  (This isn't fully implemented yet.)
  23 */
  24#include <linux/module.h>
  25#include <linux/errno.h>
  26#include <linux/mm.h>
  27#include <linux/vmalloc.h>
  28#include <linux/io.h>
  29#include <linux/sizes.h>
  30
  31#include <asm/cp15.h>
  32#include <asm/cputype.h>
  33#include <asm/cacheflush.h>
  34#include <asm/early_ioremap.h>
  35#include <asm/mmu_context.h>
  36#include <asm/pgalloc.h>
  37#include <asm/tlbflush.h>
  38#include <asm/system_info.h>
  39
  40#include <asm/mach/map.h>
  41#include <asm/mach/pci.h>
  42#include "mm.h"
  43
  44
  45LIST_HEAD(static_vmlist);
  46
  47static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
  48                        size_t size, unsigned int mtype)
  49{
  50        struct static_vm *svm;
  51        struct vm_struct *vm;
  52
  53        list_for_each_entry(svm, &static_vmlist, list) {
  54                vm = &svm->vm;
  55                if (!(vm->flags & VM_ARM_STATIC_MAPPING))
  56                        continue;
  57                if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
  58                        continue;
  59
  60                if (vm->phys_addr > paddr ||
  61                        paddr + size - 1 > vm->phys_addr + vm->size - 1)
  62                        continue;
  63
  64                return svm;
  65        }
  66
  67        return NULL;
  68}
  69
  70struct static_vm *find_static_vm_vaddr(void *vaddr)
  71{
  72        struct static_vm *svm;
  73        struct vm_struct *vm;
  74
  75        list_for_each_entry(svm, &static_vmlist, list) {
  76                vm = &svm->vm;
  77
  78                /* static_vmlist is ascending order */
  79                if (vm->addr > vaddr)
  80                        break;
  81
  82                if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
  83                        return svm;
  84        }
  85
  86        return NULL;
  87}
  88
  89void __init add_static_vm_early(struct static_vm *svm)
  90{
  91        struct static_vm *curr_svm;
  92        struct vm_struct *vm;
  93        void *vaddr;
  94
  95        vm = &svm->vm;
  96        vm_area_add_early(vm);
  97        vaddr = vm->addr;
  98
  99        list_for_each_entry(curr_svm, &static_vmlist, list) {
 100                vm = &curr_svm->vm;
 101
 102                if (vm->addr > vaddr)
 103                        break;
 104        }
 105        list_add_tail(&svm->list, &curr_svm->list);
 106}
 107
 108int ioremap_page(unsigned long virt, unsigned long phys,
 109                 const struct mem_type *mtype)
 110{
 111        return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
 112                                  __pgprot(mtype->prot_pte));
 113}
 114EXPORT_SYMBOL(ioremap_page);
 115
 116void __check_vmalloc_seq(struct mm_struct *mm)
 117{
 118        unsigned int seq;
 119
 120        do {
 121                seq = init_mm.context.vmalloc_seq;
 122                memcpy(pgd_offset(mm, VMALLOC_START),
 123                       pgd_offset_k(VMALLOC_START),
 124                       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
 125                                        pgd_index(VMALLOC_START)));
 126                mm->context.vmalloc_seq = seq;
 127        } while (seq != init_mm.context.vmalloc_seq);
 128}
 129
 130#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 131/*
 132 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 133 * the other CPUs will not see this change until their next context switch.
 134 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 135 * which requires the new ioremap'd region to be referenced, the CPU will
 136 * reference the _old_ region.
 137 *
 138 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 139 * mask the size back to 1MB aligned or we will overflow in the loop below.
 140 */
 141static void unmap_area_sections(unsigned long virt, unsigned long size)
 142{
 143        unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
 144        pgd_t *pgd;
 145        pud_t *pud;
 146        pmd_t *pmdp;
 147
 148        flush_cache_vunmap(addr, end);
 149        pgd = pgd_offset_k(addr);
 150        pud = pud_offset(pgd, addr);
 151        pmdp = pmd_offset(pud, addr);
 152        do {
 153                pmd_t pmd = *pmdp;
 154
 155                if (!pmd_none(pmd)) {
 156                        /*
 157                         * Clear the PMD from the page table, and
 158                         * increment the vmalloc sequence so others
 159                         * notice this change.
 160                         *
 161                         * Note: this is still racy on SMP machines.
 162                         */
 163                        pmd_clear(pmdp);
 164                        init_mm.context.vmalloc_seq++;
 165
 166                        /*
 167                         * Free the page table, if there was one.
 168                         */
 169                        if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
 170                                pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
 171                }
 172
 173                addr += PMD_SIZE;
 174                pmdp += 2;
 175        } while (addr < end);
 176
 177        /*
 178         * Ensure that the active_mm is up to date - we want to
 179         * catch any use-after-iounmap cases.
 180         */
 181        if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
 182                __check_vmalloc_seq(current->active_mm);
 183
 184        flush_tlb_kernel_range(virt, end);
 185}
 186
 187static int
 188remap_area_sections(unsigned long virt, unsigned long pfn,
 189                    size_t size, const struct mem_type *type)
 190{
 191        unsigned long addr = virt, end = virt + size;
 192        pgd_t *pgd;
 193        pud_t *pud;
 194        pmd_t *pmd;
 195
 196        /*
 197         * Remove and free any PTE-based mapping, and
 198         * sync the current kernel mapping.
 199         */
 200        unmap_area_sections(virt, size);
 201
 202        pgd = pgd_offset_k(addr);
 203        pud = pud_offset(pgd, addr);
 204        pmd = pmd_offset(pud, addr);
 205        do {
 206                pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
 207                pfn += SZ_1M >> PAGE_SHIFT;
 208                pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
 209                pfn += SZ_1M >> PAGE_SHIFT;
 210                flush_pmd_entry(pmd);
 211
 212                addr += PMD_SIZE;
 213                pmd += 2;
 214        } while (addr < end);
 215
 216        return 0;
 217}
 218
 219static int
 220remap_area_supersections(unsigned long virt, unsigned long pfn,
 221                         size_t size, const struct mem_type *type)
 222{
 223        unsigned long addr = virt, end = virt + size;
 224        pgd_t *pgd;
 225        pud_t *pud;
 226        pmd_t *pmd;
 227
 228        /*
 229         * Remove and free any PTE-based mapping, and
 230         * sync the current kernel mapping.
 231         */
 232        unmap_area_sections(virt, size);
 233
 234        pgd = pgd_offset_k(virt);
 235        pud = pud_offset(pgd, addr);
 236        pmd = pmd_offset(pud, addr);
 237        do {
 238                unsigned long super_pmd_val, i;
 239
 240                super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
 241                                PMD_SECT_SUPER;
 242                super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
 243
 244                for (i = 0; i < 8; i++) {
 245                        pmd[0] = __pmd(super_pmd_val);
 246                        pmd[1] = __pmd(super_pmd_val);
 247                        flush_pmd_entry(pmd);
 248
 249                        addr += PMD_SIZE;
 250                        pmd += 2;
 251                }
 252
 253                pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
 254        } while (addr < end);
 255
 256        return 0;
 257}
 258#endif
 259
 260static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
 261        unsigned long offset, size_t size, unsigned int mtype, void *caller)
 262{
 263        const struct mem_type *type;
 264        int err;
 265        unsigned long addr;
 266        struct vm_struct *area;
 267        phys_addr_t paddr = __pfn_to_phys(pfn);
 268
 269#ifndef CONFIG_ARM_LPAE
 270        /*
 271         * High mappings must be supersection aligned
 272         */
 273        if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
 274                return NULL;
 275#endif
 276
 277        type = get_mem_type(mtype);
 278        if (!type)
 279                return NULL;
 280
 281        /*
 282         * Page align the mapping size, taking account of any offset.
 283         */
 284        size = PAGE_ALIGN(offset + size);
 285
 286        /*
 287         * Try to reuse one of the static mapping whenever possible.
 288         */
 289        if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
 290                struct static_vm *svm;
 291
 292                svm = find_static_vm_paddr(paddr, size, mtype);
 293                if (svm) {
 294                        addr = (unsigned long)svm->vm.addr;
 295                        addr += paddr - svm->vm.phys_addr;
 296                        return (void __iomem *) (offset + addr);
 297                }
 298        }
 299
 300        /*
 301         * Don't allow RAM to be mapped with mismatched attributes - this
 302         * causes problems with ARMv6+
 303         */
 304        if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
 305                return NULL;
 306
 307        area = get_vm_area_caller(size, VM_IOREMAP, caller);
 308        if (!area)
 309                return NULL;
 310        addr = (unsigned long)area->addr;
 311        area->phys_addr = paddr;
 312
 313#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 314        if (DOMAIN_IO == 0 &&
 315            (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
 316               cpu_is_xsc3()) && pfn >= 0x100000 &&
 317               !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
 318                area->flags |= VM_ARM_SECTION_MAPPING;
 319                err = remap_area_supersections(addr, pfn, size, type);
 320        } else if (!((paddr | size | addr) & ~PMD_MASK)) {
 321                area->flags |= VM_ARM_SECTION_MAPPING;
 322                err = remap_area_sections(addr, pfn, size, type);
 323        } else
 324#endif
 325                err = ioremap_page_range(addr, addr + size, paddr,
 326                                         __pgprot(type->prot_pte));
 327
 328        if (err) {
 329                vunmap((void *)addr);
 330                return NULL;
 331        }
 332
 333        flush_cache_vmap(addr, addr + size);
 334        return (void __iomem *) (offset + addr);
 335}
 336
 337void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
 338        unsigned int mtype, void *caller)
 339{
 340        phys_addr_t last_addr;
 341        unsigned long offset = phys_addr & ~PAGE_MASK;
 342        unsigned long pfn = __phys_to_pfn(phys_addr);
 343
 344        /*
 345         * Don't allow wraparound or zero size
 346         */
 347        last_addr = phys_addr + size - 1;
 348        if (!size || last_addr < phys_addr)
 349                return NULL;
 350
 351        return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
 352                        caller);
 353}
 354
 355/*
 356 * Remap an arbitrary physical address space into the kernel virtual
 357 * address space. Needed when the kernel wants to access high addresses
 358 * directly.
 359 *
 360 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 361 * have to convert them into an offset in a page-aligned mapping, but the
 362 * caller shouldn't need to know that small detail.
 363 */
 364void __iomem *
 365__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
 366                  unsigned int mtype)
 367{
 368        return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
 369                                        __builtin_return_address(0));
 370}
 371EXPORT_SYMBOL(__arm_ioremap_pfn);
 372
 373void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
 374                                      unsigned int, void *) =
 375        __arm_ioremap_caller;
 376
 377void __iomem *ioremap(resource_size_t res_cookie, size_t size)
 378{
 379        return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
 380                                   __builtin_return_address(0));
 381}
 382EXPORT_SYMBOL(ioremap);
 383
 384void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
 385        __alias(ioremap_cached);
 386
 387void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
 388{
 389        return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
 390                                   __builtin_return_address(0));
 391}
 392EXPORT_SYMBOL(ioremap_cache);
 393EXPORT_SYMBOL(ioremap_cached);
 394
 395void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
 396{
 397        return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
 398                                   __builtin_return_address(0));
 399}
 400EXPORT_SYMBOL(ioremap_wc);
 401
 402/*
 403 * Remap an arbitrary physical address space into the kernel virtual
 404 * address space as memory. Needed when the kernel wants to execute
 405 * code in external memory. This is needed for reprogramming source
 406 * clocks that would affect normal memory for example. Please see
 407 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
 408 */
 409void __iomem *
 410__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
 411{
 412        unsigned int mtype;
 413
 414        if (cached)
 415                mtype = MT_MEMORY_RWX;
 416        else
 417                mtype = MT_MEMORY_RWX_NONCACHED;
 418
 419        return __arm_ioremap_caller(phys_addr, size, mtype,
 420                        __builtin_return_address(0));
 421}
 422
 423void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
 424{
 425        return (__force void *)arch_ioremap_caller(phys_addr, size,
 426                                                   MT_MEMORY_RW,
 427                                                   __builtin_return_address(0));
 428}
 429
 430void __iounmap(volatile void __iomem *io_addr)
 431{
 432        void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
 433        struct static_vm *svm;
 434
 435        /* If this is a static mapping, we must leave it alone */
 436        svm = find_static_vm_vaddr(addr);
 437        if (svm)
 438                return;
 439
 440#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 441        {
 442                struct vm_struct *vm;
 443
 444                vm = find_vm_area(addr);
 445
 446                /*
 447                 * If this is a section based mapping we need to handle it
 448                 * specially as the VM subsystem does not know how to handle
 449                 * such a beast.
 450                 */
 451                if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
 452                        unmap_area_sections((unsigned long)vm->addr, vm->size);
 453        }
 454#endif
 455
 456        vunmap(addr);
 457}
 458
 459void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
 460
 461void iounmap(volatile void __iomem *cookie)
 462{
 463        arch_iounmap(cookie);
 464}
 465EXPORT_SYMBOL(iounmap);
 466
 467#ifdef CONFIG_PCI
 468static int pci_ioremap_mem_type = MT_DEVICE;
 469
 470void pci_ioremap_set_mem_type(int mem_type)
 471{
 472        pci_ioremap_mem_type = mem_type;
 473}
 474
 475int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
 476{
 477        BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
 478
 479        return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
 480                                  PCI_IO_VIRT_BASE + offset + SZ_64K,
 481                                  phys_addr,
 482                                  __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
 483}
 484EXPORT_SYMBOL_GPL(pci_ioremap_io);
 485
 486void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
 487{
 488        return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
 489                                   __builtin_return_address(0));
 490}
 491EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
 492#endif
 493
 494/*
 495 * Must be called after early_fixmap_init
 496 */
 497void __init early_ioremap_init(void)
 498{
 499        early_ioremap_setup();
 500}
 501