linux/arch/x86/mm/ioremap.c
<<
>>
Prefs
   1/*
   2 * Re-map IO memory to kernel address space so that we can access it.
   3 * This is needed for high PCI addresses that aren't mapped in the
   4 * 640k-1MB IO memory area on PC's
   5 *
   6 * (C) Copyright 1995 1996 Linus Torvalds
   7 */
   8
   9#include <linux/bootmem.h>
  10#include <linux/init.h>
  11#include <linux/io.h>
  12#include <linux/module.h>
  13#include <linux/slab.h>
  14#include <linux/vmalloc.h>
  15#include <linux/mmiotrace.h>
  16
  17#include <asm/cacheflush.h>
  18#include <asm/e820.h>
  19#include <asm/fixmap.h>
  20#include <asm/pgtable.h>
  21#include <asm/tlbflush.h>
  22#include <asm/pgalloc.h>
  23#include <asm/pat.h>
  24
  25#include "physaddr.h"
  26
  27/*
  28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
  29 * conflicts.
  30 */
  31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  32                               unsigned long prot_val)
  33{
  34        unsigned long nrpages = size >> PAGE_SHIFT;
  35        int err;
  36
  37        switch (prot_val) {
  38        case _PAGE_CACHE_UC:
  39        default:
  40                err = _set_memory_uc(vaddr, nrpages);
  41                break;
  42        case _PAGE_CACHE_WC:
  43                err = _set_memory_wc(vaddr, nrpages);
  44                break;
  45        case _PAGE_CACHE_WB:
  46                err = _set_memory_wb(vaddr, nrpages);
  47                break;
  48        }
  49
  50        return err;
  51}
  52
  53/*
  54 * Remap an arbitrary physical address space into the kernel virtual
  55 * address space. Needed when the kernel wants to access high addresses
  56 * directly.
  57 *
  58 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  59 * have to convert them into an offset in a page-aligned mapping, but the
  60 * caller shouldn't need to know that small detail.
  61 */
  62static void __iomem *__ioremap_caller(resource_size_t phys_addr,
  63                unsigned long size, unsigned long prot_val, void *caller)
  64{
  65        unsigned long offset, vaddr;
  66        resource_size_t pfn, last_pfn, last_addr;
  67        const resource_size_t unaligned_phys_addr = phys_addr;
  68        const unsigned long unaligned_size = size;
  69        struct vm_struct *area;
  70        unsigned long new_prot_val;
  71        pgprot_t prot;
  72        int retval;
  73        void __iomem *ret_addr;
  74
  75        /* Don't allow wraparound or zero size */
  76        last_addr = phys_addr + size - 1;
  77        if (!size || last_addr < phys_addr)
  78                return NULL;
  79
  80        if (!phys_addr_valid(phys_addr)) {
  81                printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
  82                       (unsigned long long)phys_addr);
  83                WARN_ON_ONCE(1);
  84                return NULL;
  85        }
  86
  87        /*
  88         * Don't remap the low PCI/ISA area, it's always mapped..
  89         */
  90        if (is_ISA_range(phys_addr, last_addr))
  91                return (__force void __iomem *)phys_to_virt(phys_addr);
  92
  93        /*
  94         * Check if the request spans more than any BAR in the iomem resource
  95         * tree.
  96         */
  97        WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
  98                  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
  99
 100        /*
 101         * Don't allow anybody to remap normal RAM that we're using..
 102         */
 103        last_pfn = last_addr >> PAGE_SHIFT;
 104        for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
 105                int is_ram = page_is_ram(pfn);
 106
 107                if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
 108                        return NULL;
 109                WARN_ON_ONCE(is_ram);
 110        }
 111
 112        /*
 113         * Mappings have to be page-aligned
 114         */
 115        offset = phys_addr & ~PAGE_MASK;
 116        phys_addr &= PHYSICAL_PAGE_MASK;
 117        size = PAGE_ALIGN(last_addr+1) - phys_addr;
 118
 119        retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
 120                                                prot_val, &new_prot_val);
 121        if (retval) {
 122                printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
 123                return NULL;
 124        }
 125
 126        if (prot_val != new_prot_val) {
 127                if (!is_new_memtype_allowed(phys_addr, size,
 128                                            prot_val, new_prot_val)) {
 129                        printk(KERN_ERR
 130                "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
 131                                (unsigned long long)phys_addr,
 132                                (unsigned long long)(phys_addr + size),
 133                                prot_val, new_prot_val);
 134                        goto err_free_memtype;
 135                }
 136                prot_val = new_prot_val;
 137        }
 138
 139        switch (prot_val) {
 140        case _PAGE_CACHE_UC:
 141        default:
 142                prot = PAGE_KERNEL_IO_NOCACHE;
 143                break;
 144        case _PAGE_CACHE_UC_MINUS:
 145                prot = PAGE_KERNEL_IO_UC_MINUS;
 146                break;
 147        case _PAGE_CACHE_WC:
 148                prot = PAGE_KERNEL_IO_WC;
 149                break;
 150        case _PAGE_CACHE_WB:
 151                prot = PAGE_KERNEL_IO;
 152                break;
 153        }
 154
 155        /*
 156         * Ok, go for it..
 157         */
 158        area = get_vm_area_caller(size, VM_IOREMAP, caller);
 159        if (!area)
 160                goto err_free_memtype;
 161        area->phys_addr = phys_addr;
 162        vaddr = (unsigned long) area->addr;
 163
 164        if (kernel_map_sync_memtype(phys_addr, size, prot_val))
 165                goto err_free_area;
 166
 167        if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
 168                goto err_free_area;
 169
 170        ret_addr = (void __iomem *) (vaddr + offset);
 171        mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
 172
 173        return ret_addr;
 174err_free_area:
 175        free_vm_area(area);
 176err_free_memtype:
 177        free_memtype(phys_addr, phys_addr + size);
 178        return NULL;
 179}
 180
 181/**
 182 * ioremap_nocache     -   map bus memory into CPU space
 183 * @offset:    bus address of the memory
 184 * @size:      size of the resource to map
 185 *
 186 * ioremap_nocache performs a platform specific sequence of operations to
 187 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 188 * writew/writel functions and the other mmio helpers. The returned
 189 * address is not guaranteed to be usable directly as a virtual
 190 * address.
 191 *
 192 * This version of ioremap ensures that the memory is marked uncachable
 193 * on the CPU as well as honouring existing caching rules from things like
 194 * the PCI bus. Note that there are other caches and buffers on many
 195 * busses. In particular driver authors should read up on PCI writes
 196 *
 197 * It's useful if some control registers are in such an area and
 198 * write combining or read caching is not desirable:
 199 *
 200 * Must be freed with iounmap.
 201 */
 202void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 203{
 204        /*
 205         * Ideally, this should be:
 206         *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
 207         *
 208         * Till we fix all X drivers to use ioremap_wc(), we will use
 209         * UC MINUS.
 210         */
 211        unsigned long val = _PAGE_CACHE_UC_MINUS;
 212
 213        return __ioremap_caller(phys_addr, size, val,
 214                                __builtin_return_address(0));
 215}
 216EXPORT_SYMBOL(ioremap_nocache);
 217
 218/**
 219 * ioremap_wc   -       map memory into CPU space write combined
 220 * @offset:     bus address of the memory
 221 * @size:       size of the resource to map
 222 *
 223 * This version of ioremap ensures that the memory is marked write combining.
 224 * Write combining allows faster writes to some hardware devices.
 225 *
 226 * Must be freed with iounmap.
 227 */
 228void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
 229{
 230        if (pat_enabled)
 231                return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
 232                                        __builtin_return_address(0));
 233        else
 234                return ioremap_nocache(phys_addr, size);
 235}
 236EXPORT_SYMBOL(ioremap_wc);
 237
 238void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 239{
 240        return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
 241                                __builtin_return_address(0));
 242}
 243EXPORT_SYMBOL(ioremap_cache);
 244
 245void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
 246                                unsigned long prot_val)
 247{
 248        return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
 249                                __builtin_return_address(0));
 250}
 251EXPORT_SYMBOL(ioremap_prot);
 252
 253/**
 254 * iounmap - Free a IO remapping
 255 * @addr: virtual address from ioremap_*
 256 *
 257 * Caller must ensure there is only one unmapping for the same pointer.
 258 */
 259void iounmap(volatile void __iomem *addr)
 260{
 261        struct vm_struct *p, *o;
 262
 263        if ((void __force *)addr <= high_memory)
 264                return;
 265
 266        /*
 267         * __ioremap special-cases the PCI/ISA range by not instantiating a
 268         * vm_area and by simply returning an address into the kernel mapping
 269         * of ISA space.   So handle that here.
 270         */
 271        if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
 272            (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
 273                return;
 274
 275        addr = (volatile void __iomem *)
 276                (PAGE_MASK & (unsigned long __force)addr);
 277
 278        mmiotrace_iounmap(addr);
 279
 280        /* Use the vm area unlocked, assuming the caller
 281           ensures there isn't another iounmap for the same address
 282           in parallel. Reuse of the virtual address is prevented by
 283           leaving it in the global lists until we're done with it.
 284           cpa takes care of the direct mappings. */
 285        read_lock(&vmlist_lock);
 286        for (p = vmlist; p; p = p->next) {
 287                if (p->addr == (void __force *)addr)
 288                        break;
 289        }
 290        read_unlock(&vmlist_lock);
 291
 292        if (!p) {
 293                printk(KERN_ERR "iounmap: bad address %p\n", addr);
 294                dump_stack();
 295                return;
 296        }
 297
 298        free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
 299
 300        /* Finally remove it */
 301        o = remove_vm_area((void __force *)addr);
 302        BUG_ON(p != o || o == NULL);
 303        kfree(p);
 304}
 305EXPORT_SYMBOL(iounmap);
 306
 307/*
 308 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 309 * access
 310 */
 311void *xlate_dev_mem_ptr(unsigned long phys)
 312{
 313        void *addr;
 314        unsigned long start = phys & PAGE_MASK;
 315
 316        /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
 317        if (page_is_ram(start >> PAGE_SHIFT))
 318                return __va(phys);
 319
 320        addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
 321        if (addr)
 322                addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
 323
 324        return addr;
 325}
 326
 327void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
 328{
 329        if (page_is_ram(phys >> PAGE_SHIFT))
 330                return;
 331
 332        iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
 333        return;
 334}
 335
 336static int __initdata early_ioremap_debug;
 337
 338static int __init early_ioremap_debug_setup(char *str)
 339{
 340        early_ioremap_debug = 1;
 341
 342        return 0;
 343}
 344early_param("early_ioremap_debug", early_ioremap_debug_setup);
 345
 346static __initdata int after_paging_init;
 347static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
 348
 349static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
 350{
 351        /* Don't assume we're using swapper_pg_dir at this point */
 352        pgd_t *base = __va(read_cr3());
 353        pgd_t *pgd = &base[pgd_index(addr)];
 354        pud_t *pud = pud_offset(pgd, addr);
 355        pmd_t *pmd = pmd_offset(pud, addr);
 356
 357        return pmd;
 358}
 359
 360static inline pte_t * __init early_ioremap_pte(unsigned long addr)
 361{
 362        return &bm_pte[pte_index(addr)];
 363}
 364
 365bool __init is_early_ioremap_ptep(pte_t *ptep)
 366{
 367        return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
 368}
 369
 370static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
 371
 372void __init early_ioremap_init(void)
 373{
 374        pmd_t *pmd;
 375        int i;
 376
 377        if (early_ioremap_debug)
 378                printk(KERN_INFO "early_ioremap_init()\n");
 379
 380        for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
 381                slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
 382
 383        pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
 384        memset(bm_pte, 0, sizeof(bm_pte));
 385        pmd_populate_kernel(&init_mm, pmd, bm_pte);
 386
 387        /*
 388         * The boot-ioremap range spans multiple pmds, for which
 389         * we are not prepared:
 390         */
 391#define __FIXADDR_TOP (-PAGE_SIZE)
 392        BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
 393                     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
 394#undef __FIXADDR_TOP
 395        if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
 396                WARN_ON(1);
 397                printk(KERN_WARNING "pmd %p != %p\n",
 398                       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
 399                printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
 400                        fix_to_virt(FIX_BTMAP_BEGIN));
 401                printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
 402                        fix_to_virt(FIX_BTMAP_END));
 403
 404                printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
 405                printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
 406                       FIX_BTMAP_BEGIN);
 407        }
 408}
 409
 410void __init early_ioremap_reset(void)
 411{
 412        after_paging_init = 1;
 413}
 414
 415static void __init __early_set_fixmap(enum fixed_addresses idx,
 416                                      phys_addr_t phys, pgprot_t flags)
 417{
 418        unsigned long addr = __fix_to_virt(idx);
 419        pte_t *pte;
 420
 421        if (idx >= __end_of_fixed_addresses) {
 422                BUG();
 423                return;
 424        }
 425        pte = early_ioremap_pte(addr);
 426
 427        if (pgprot_val(flags))
 428                set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
 429        else
 430                pte_clear(&init_mm, addr, pte);
 431        __flush_tlb_one(addr);
 432}
 433
 434static inline void __init early_set_fixmap(enum fixed_addresses idx,
 435                                           phys_addr_t phys, pgprot_t prot)
 436{
 437        if (after_paging_init)
 438                __set_fixmap(idx, phys, prot);
 439        else
 440                __early_set_fixmap(idx, phys, prot);
 441}
 442
 443static inline void __init early_clear_fixmap(enum fixed_addresses idx)
 444{
 445        if (after_paging_init)
 446                clear_fixmap(idx);
 447        else
 448                __early_set_fixmap(idx, 0, __pgprot(0));
 449}
 450
 451static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
 452static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
 453
 454void __init fixup_early_ioremap(void)
 455{
 456        int i;
 457
 458        for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
 459                if (prev_map[i]) {
 460                        WARN_ON(1);
 461                        break;
 462                }
 463        }
 464
 465        early_ioremap_init();
 466}
 467
 468static int __init check_early_ioremap_leak(void)
 469{
 470        int count = 0;
 471        int i;
 472
 473        for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
 474                if (prev_map[i])
 475                        count++;
 476
 477        if (!count)
 478                return 0;
 479        WARN(1, KERN_WARNING
 480               "Debug warning: early ioremap leak of %d areas detected.\n",
 481                count);
 482        printk(KERN_WARNING
 483                "please boot with early_ioremap_debug and report the dmesg.\n");
 484
 485        return 1;
 486}
 487late_initcall(check_early_ioremap_leak);
 488
 489static void __init __iomem *
 490__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
 491{
 492        unsigned long offset;
 493        resource_size_t last_addr;
 494        unsigned int nrpages;
 495        enum fixed_addresses idx0, idx;
 496        int i, slot;
 497
 498        WARN_ON(system_state != SYSTEM_BOOTING);
 499
 500        slot = -1;
 501        for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
 502                if (!prev_map[i]) {
 503                        slot = i;
 504                        break;
 505                }
 506        }
 507
 508        if (slot < 0) {
 509                printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
 510                         (u64)phys_addr, size);
 511                WARN_ON(1);
 512                return NULL;
 513        }
 514
 515        if (early_ioremap_debug) {
 516                printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
 517                       (u64)phys_addr, size, slot);
 518                dump_stack();
 519        }
 520
 521        /* Don't allow wraparound or zero size */
 522        last_addr = phys_addr + size - 1;
 523        if (!size || last_addr < phys_addr) {
 524                WARN_ON(1);
 525                return NULL;
 526        }
 527
 528        prev_size[slot] = size;
 529        /*
 530         * Mappings have to be page-aligned
 531         */
 532        offset = phys_addr & ~PAGE_MASK;
 533        phys_addr &= PAGE_MASK;
 534        size = PAGE_ALIGN(last_addr + 1) - phys_addr;
 535
 536        /*
 537         * Mappings have to fit in the FIX_BTMAP area.
 538         */
 539        nrpages = size >> PAGE_SHIFT;
 540        if (nrpages > NR_FIX_BTMAPS) {
 541                WARN_ON(1);
 542                return NULL;
 543        }
 544
 545        /*
 546         * Ok, go for it..
 547         */
 548        idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
 549        idx = idx0;
 550        while (nrpages > 0) {
 551                early_set_fixmap(idx, phys_addr, prot);
 552                phys_addr += PAGE_SIZE;
 553                --idx;
 554                --nrpages;
 555        }
 556        if (early_ioremap_debug)
 557                printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
 558
 559        prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
 560        return prev_map[slot];
 561}
 562
 563/* Remap an IO device */
 564void __init __iomem *
 565early_ioremap(resource_size_t phys_addr, unsigned long size)
 566{
 567        return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
 568}
 569
 570/* Remap memory */
 571void __init __iomem *
 572early_memremap(resource_size_t phys_addr, unsigned long size)
 573{
 574        return __early_ioremap(phys_addr, size, PAGE_KERNEL);
 575}
 576
 577void __init early_iounmap(void __iomem *addr, unsigned long size)
 578{
 579        unsigned long virt_addr;
 580        unsigned long offset;
 581        unsigned int nrpages;
 582        enum fixed_addresses idx;
 583        int i, slot;
 584
 585        slot = -1;
 586        for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
 587                if (prev_map[i] == addr) {
 588                        slot = i;
 589                        break;
 590                }
 591        }
 592
 593        if (slot < 0) {
 594                printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
 595                         addr, size);
 596                WARN_ON(1);
 597                return;
 598        }
 599
 600        if (prev_size[slot] != size) {
 601                printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
 602                         addr, size, slot, prev_size[slot]);
 603                WARN_ON(1);
 604                return;
 605        }
 606
 607        if (early_ioremap_debug) {
 608                printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
 609                       size, slot);
 610                dump_stack();
 611        }
 612
 613        virt_addr = (unsigned long)addr;
 614        if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
 615                WARN_ON(1);
 616                return;
 617        }
 618        offset = virt_addr & ~PAGE_MASK;
 619        nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
 620
 621        idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
 622        while (nrpages > 0) {
 623                early_clear_fixmap(idx);
 624                --idx;
 625                --nrpages;
 626        }
 627        prev_map[slot] = NULL;
 628}
 629