linux/arch/x86/mm/ioremap.c
<<
>>
Prefs
   1/*
   2 * Re-map IO memory to kernel address space so that we can access it.
   3 * This is needed for high PCI addresses that aren't mapped in the
   4 * 640k-1MB IO memory area on PC's
   5 *
   6 * (C) Copyright 1995 1996 Linus Torvalds
   7 */
   8
   9#include <linux/bootmem.h>
  10#include <linux/init.h>
  11#include <linux/io.h>
  12#include <linux/slab.h>
  13#include <linux/vmalloc.h>
  14#include <linux/mmiotrace.h>
  15
  16#include <asm/cacheflush.h>
  17#include <asm/e820.h>
  18#include <asm/fixmap.h>
  19#include <asm/pgtable.h>
  20#include <asm/tlbflush.h>
  21#include <asm/pgalloc.h>
  22#include <asm/pat.h>
  23
  24#include "physaddr.h"
  25
  26/*
  27 * Fix up the linear direct mapping of the kernel to avoid cache attribute
  28 * conflicts.
  29 */
  30int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  31                        enum page_cache_mode pcm)
  32{
  33        unsigned long nrpages = size >> PAGE_SHIFT;
  34        int err;
  35
  36        switch (pcm) {
  37        case _PAGE_CACHE_MODE_UC:
  38        default:
  39                err = _set_memory_uc(vaddr, nrpages);
  40                break;
  41        case _PAGE_CACHE_MODE_WC:
  42                err = _set_memory_wc(vaddr, nrpages);
  43                break;
  44        case _PAGE_CACHE_MODE_WT:
  45                err = _set_memory_wt(vaddr, nrpages);
  46                break;
  47        case _PAGE_CACHE_MODE_WB:
  48                err = _set_memory_wb(vaddr, nrpages);
  49                break;
  50        }
  51
  52        return err;
  53}
  54
  55static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
  56                               void *arg)
  57{
  58        unsigned long i;
  59
  60        for (i = 0; i < nr_pages; ++i)
  61                if (pfn_valid(start_pfn + i) &&
  62                    !PageReserved(pfn_to_page(start_pfn + i)))
  63                        return 1;
  64
  65        return 0;
  66}
  67
  68/*
  69 * Remap an arbitrary physical address space into the kernel virtual
  70 * address space. It transparently creates kernel huge I/O mapping when
  71 * the physical address is aligned by a huge page size (1GB or 2MB) and
  72 * the requested size is at least the huge page size.
  73 *
  74 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
  75 * Therefore, the mapping code falls back to use a smaller page toward 4KB
  76 * when a mapping range is covered by non-WB type of MTRRs.
  77 *
  78 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  79 * have to convert them into an offset in a page-aligned mapping, but the
  80 * caller shouldn't need to know that small detail.
  81 */
  82static void __iomem *__ioremap_caller(resource_size_t phys_addr,
  83                unsigned long size, enum page_cache_mode pcm, void *caller)
  84{
  85        unsigned long offset, vaddr;
  86        resource_size_t pfn, last_pfn, last_addr;
  87        const resource_size_t unaligned_phys_addr = phys_addr;
  88        const unsigned long unaligned_size = size;
  89        struct vm_struct *area;
  90        enum page_cache_mode new_pcm;
  91        pgprot_t prot;
  92        int retval;
  93        void __iomem *ret_addr;
  94
  95        /* Don't allow wraparound or zero size */
  96        last_addr = phys_addr + size - 1;
  97        if (!size || last_addr < phys_addr)
  98                return NULL;
  99
 100        if (!phys_addr_valid(phys_addr)) {
 101                printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
 102                       (unsigned long long)phys_addr);
 103                WARN_ON_ONCE(1);
 104                return NULL;
 105        }
 106
 107        /*
 108         * Don't remap the low PCI/ISA area, it's always mapped..
 109         */
 110        if (is_ISA_range(phys_addr, last_addr))
 111                return (__force void __iomem *)phys_to_virt(phys_addr);
 112
 113        /*
 114         * Don't allow anybody to remap normal RAM that we're using..
 115         */
 116        pfn      = phys_addr >> PAGE_SHIFT;
 117        last_pfn = last_addr >> PAGE_SHIFT;
 118        if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
 119                                          __ioremap_check_ram) == 1) {
 120                WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
 121                          &phys_addr, &last_addr);
 122                return NULL;
 123        }
 124
 125        /*
 126         * Mappings have to be page-aligned
 127         */
 128        offset = phys_addr & ~PAGE_MASK;
 129        phys_addr &= PHYSICAL_PAGE_MASK;
 130        size = PAGE_ALIGN(last_addr+1) - phys_addr;
 131
 132        retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
 133                                                pcm, &new_pcm);
 134        if (retval) {
 135                printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
 136                return NULL;
 137        }
 138
 139        if (pcm != new_pcm) {
 140                if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
 141                        printk(KERN_ERR
 142                "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
 143                                (unsigned long long)phys_addr,
 144                                (unsigned long long)(phys_addr + size),
 145                                pcm, new_pcm);
 146                        goto err_free_memtype;
 147                }
 148                pcm = new_pcm;
 149        }
 150
 151        prot = PAGE_KERNEL_IO;
 152        switch (pcm) {
 153        case _PAGE_CACHE_MODE_UC:
 154        default:
 155                prot = __pgprot(pgprot_val(prot) |
 156                                cachemode2protval(_PAGE_CACHE_MODE_UC));
 157                break;
 158        case _PAGE_CACHE_MODE_UC_MINUS:
 159                prot = __pgprot(pgprot_val(prot) |
 160                                cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
 161                break;
 162        case _PAGE_CACHE_MODE_WC:
 163                prot = __pgprot(pgprot_val(prot) |
 164                                cachemode2protval(_PAGE_CACHE_MODE_WC));
 165                break;
 166        case _PAGE_CACHE_MODE_WT:
 167                prot = __pgprot(pgprot_val(prot) |
 168                                cachemode2protval(_PAGE_CACHE_MODE_WT));
 169                break;
 170        case _PAGE_CACHE_MODE_WB:
 171                break;
 172        }
 173
 174        /*
 175         * Ok, go for it..
 176         */
 177        area = get_vm_area_caller(size, VM_IOREMAP, caller);
 178        if (!area)
 179                goto err_free_memtype;
 180        area->phys_addr = phys_addr;
 181        vaddr = (unsigned long) area->addr;
 182
 183        if (kernel_map_sync_memtype(phys_addr, size, pcm))
 184                goto err_free_area;
 185
 186        if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
 187                goto err_free_area;
 188
 189        ret_addr = (void __iomem *) (vaddr + offset);
 190        mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
 191
 192        /*
 193         * Check if the request spans more than any BAR in the iomem resource
 194         * tree.
 195         */
 196        if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
 197                pr_warn("caller %pS mapping multiple BARs\n", caller);
 198
 199        return ret_addr;
 200err_free_area:
 201        free_vm_area(area);
 202err_free_memtype:
 203        free_memtype(phys_addr, phys_addr + size);
 204        return NULL;
 205}
 206
 207/**
 208 * ioremap_nocache     -   map bus memory into CPU space
 209 * @phys_addr:    bus address of the memory
 210 * @size:      size of the resource to map
 211 *
 212 * ioremap_nocache performs a platform specific sequence of operations to
 213 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 214 * writew/writel functions and the other mmio helpers. The returned
 215 * address is not guaranteed to be usable directly as a virtual
 216 * address.
 217 *
 218 * This version of ioremap ensures that the memory is marked uncachable
 219 * on the CPU as well as honouring existing caching rules from things like
 220 * the PCI bus. Note that there are other caches and buffers on many
 221 * busses. In particular driver authors should read up on PCI writes
 222 *
 223 * It's useful if some control registers are in such an area and
 224 * write combining or read caching is not desirable:
 225 *
 226 * Must be freed with iounmap.
 227 */
 228void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 229{
 230        /*
 231         * Ideally, this should be:
 232         *      pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
 233         *
 234         * Till we fix all X drivers to use ioremap_wc(), we will use
 235         * UC MINUS. Drivers that are certain they need or can already
 236         * be converted over to strong UC can use ioremap_uc().
 237         */
 238        enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
 239
 240        return __ioremap_caller(phys_addr, size, pcm,
 241                                __builtin_return_address(0));
 242}
 243EXPORT_SYMBOL(ioremap_nocache);
 244
 245/**
 246 * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
 247 * @phys_addr:    bus address of the memory
 248 * @size:      size of the resource to map
 249 *
 250 * ioremap_uc performs a platform specific sequence of operations to
 251 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 252 * writew/writel functions and the other mmio helpers. The returned
 253 * address is not guaranteed to be usable directly as a virtual
 254 * address.
 255 *
 256 * This version of ioremap ensures that the memory is marked with a strong
 257 * preference as completely uncachable on the CPU when possible. For non-PAT
 258 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
 259 * systems this will set the PAT entry for the pages as strong UC.  This call
 260 * will honor existing caching rules from things like the PCI bus. Note that
 261 * there are other caches and buffers on many busses. In particular driver
 262 * authors should read up on PCI writes.
 263 *
 264 * It's useful if some control registers are in such an area and
 265 * write combining or read caching is not desirable:
 266 *
 267 * Must be freed with iounmap.
 268 */
 269void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
 270{
 271        enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
 272
 273        return __ioremap_caller(phys_addr, size, pcm,
 274                                __builtin_return_address(0));
 275}
 276EXPORT_SYMBOL_GPL(ioremap_uc);
 277
 278/**
 279 * ioremap_wc   -       map memory into CPU space write combined
 280 * @phys_addr:  bus address of the memory
 281 * @size:       size of the resource to map
 282 *
 283 * This version of ioremap ensures that the memory is marked write combining.
 284 * Write combining allows faster writes to some hardware devices.
 285 *
 286 * Must be freed with iounmap.
 287 */
 288void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
 289{
 290        return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
 291                                        __builtin_return_address(0));
 292}
 293EXPORT_SYMBOL(ioremap_wc);
 294
 295/**
 296 * ioremap_wt   -       map memory into CPU space write through
 297 * @phys_addr:  bus address of the memory
 298 * @size:       size of the resource to map
 299 *
 300 * This version of ioremap ensures that the memory is marked write through.
 301 * Write through stores data into memory while keeping the cache up-to-date.
 302 *
 303 * Must be freed with iounmap.
 304 */
 305void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
 306{
 307        return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
 308                                        __builtin_return_address(0));
 309}
 310EXPORT_SYMBOL(ioremap_wt);
 311
 312void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 313{
 314        return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
 315                                __builtin_return_address(0));
 316}
 317EXPORT_SYMBOL(ioremap_cache);
 318
 319void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
 320                                unsigned long prot_val)
 321{
 322        return __ioremap_caller(phys_addr, size,
 323                                pgprot2cachemode(__pgprot(prot_val)),
 324                                __builtin_return_address(0));
 325}
 326EXPORT_SYMBOL(ioremap_prot);
 327
 328/**
 329 * iounmap - Free a IO remapping
 330 * @addr: virtual address from ioremap_*
 331 *
 332 * Caller must ensure there is only one unmapping for the same pointer.
 333 */
 334void iounmap(volatile void __iomem *addr)
 335{
 336        struct vm_struct *p, *o;
 337
 338        if ((void __force *)addr <= high_memory)
 339                return;
 340
 341        /*
 342         * __ioremap special-cases the PCI/ISA range by not instantiating a
 343         * vm_area and by simply returning an address into the kernel mapping
 344         * of ISA space.   So handle that here.
 345         */
 346        if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
 347            (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
 348                return;
 349
 350        addr = (volatile void __iomem *)
 351                (PAGE_MASK & (unsigned long __force)addr);
 352
 353        mmiotrace_iounmap(addr);
 354
 355        /* Use the vm area unlocked, assuming the caller
 356           ensures there isn't another iounmap for the same address
 357           in parallel. Reuse of the virtual address is prevented by
 358           leaving it in the global lists until we're done with it.
 359           cpa takes care of the direct mappings. */
 360        p = find_vm_area((void __force *)addr);
 361
 362        if (!p) {
 363                printk(KERN_ERR "iounmap: bad address %p\n", addr);
 364                dump_stack();
 365                return;
 366        }
 367
 368        free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
 369
 370        /* Finally remove it */
 371        o = remove_vm_area((void __force *)addr);
 372        BUG_ON(p != o || o == NULL);
 373        kfree(p);
 374}
 375EXPORT_SYMBOL(iounmap);
 376
 377int __init arch_ioremap_pud_supported(void)
 378{
 379#ifdef CONFIG_X86_64
 380        return boot_cpu_has(X86_FEATURE_GBPAGES);
 381#else
 382        return 0;
 383#endif
 384}
 385
 386int __init arch_ioremap_pmd_supported(void)
 387{
 388        return boot_cpu_has(X86_FEATURE_PSE);
 389}
 390
 391/*
 392 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 393 * access
 394 */
 395void *xlate_dev_mem_ptr(phys_addr_t phys)
 396{
 397        unsigned long start  = phys &  PAGE_MASK;
 398        unsigned long offset = phys & ~PAGE_MASK;
 399        void *vaddr;
 400
 401        /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
 402        if (page_is_ram(start >> PAGE_SHIFT))
 403                return __va(phys);
 404
 405        vaddr = ioremap_cache(start, PAGE_SIZE);
 406        /* Only add the offset on success and return NULL if the ioremap() failed: */
 407        if (vaddr)
 408                vaddr += offset;
 409
 410        return vaddr;
 411}
 412
 413void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
 414{
 415        if (page_is_ram(phys >> PAGE_SHIFT))
 416                return;
 417
 418        iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
 419}
 420
 421static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
 422
 423static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
 424{
 425        /* Don't assume we're using swapper_pg_dir at this point */
 426        pgd_t *base = __va(read_cr3());
 427        pgd_t *pgd = &base[pgd_index(addr)];
 428        pud_t *pud = pud_offset(pgd, addr);
 429        pmd_t *pmd = pmd_offset(pud, addr);
 430
 431        return pmd;
 432}
 433
 434static inline pte_t * __init early_ioremap_pte(unsigned long addr)
 435{
 436        return &bm_pte[pte_index(addr)];
 437}
 438
 439bool __init is_early_ioremap_ptep(pte_t *ptep)
 440{
 441        return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
 442}
 443
 444void __init early_ioremap_init(void)
 445{
 446        pmd_t *pmd;
 447
 448#ifdef CONFIG_X86_64
 449        BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
 450#else
 451        WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
 452#endif
 453
 454        early_ioremap_setup();
 455
 456        pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
 457        memset(bm_pte, 0, sizeof(bm_pte));
 458        pmd_populate_kernel(&init_mm, pmd, bm_pte);
 459
 460        /*
 461         * The boot-ioremap range spans multiple pmds, for which
 462         * we are not prepared:
 463         */
 464#define __FIXADDR_TOP (-PAGE_SIZE)
 465        BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
 466                     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
 467#undef __FIXADDR_TOP
 468        if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
 469                WARN_ON(1);
 470                printk(KERN_WARNING "pmd %p != %p\n",
 471                       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
 472                printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
 473                        fix_to_virt(FIX_BTMAP_BEGIN));
 474                printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
 475                        fix_to_virt(FIX_BTMAP_END));
 476
 477                printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
 478                printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
 479                       FIX_BTMAP_BEGIN);
 480        }
 481}
 482
 483void __init __early_set_fixmap(enum fixed_addresses idx,
 484                               phys_addr_t phys, pgprot_t flags)
 485{
 486        unsigned long addr = __fix_to_virt(idx);
 487        pte_t *pte;
 488
 489        if (idx >= __end_of_fixed_addresses) {
 490                BUG();
 491                return;
 492        }
 493        pte = early_ioremap_pte(addr);
 494
 495        if (pgprot_val(flags))
 496                set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
 497        else
 498                pte_clear(&init_mm, addr, pte);
 499        __flush_tlb_one(addr);
 500}
 501