linux/arch/x86/mm/ioremap.c
<<
>>
Prefs
   1/*
   2 * Re-map IO memory to kernel address space so that we can access it.
   3 * This is needed for high PCI addresses that aren't mapped in the
   4 * 640k-1MB IO memory area on PC's
   5 *
   6 * (C) Copyright 1995 1996 Linus Torvalds
   7 */
   8
   9#include <linux/bootmem.h>
  10#include <linux/init.h>
  11#include <linux/io.h>
  12#include <linux/module.h>
  13#include <linux/slab.h>
  14#include <linux/vmalloc.h>
  15#include <linux/mmiotrace.h>
  16
  17#include <asm/cacheflush.h>
  18#include <asm/e820.h>
  19#include <asm/fixmap.h>
  20#include <asm/pgtable.h>
  21#include <asm/tlbflush.h>
  22#include <asm/pgalloc.h>
  23#include <asm/pat.h>
  24
  25#include "physaddr.h"
  26
  27/*
  28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
  29 * conflicts.
  30 */
  31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  32                               unsigned long prot_val)
  33{
  34        unsigned long nrpages = size >> PAGE_SHIFT;
  35        int err;
  36
  37        switch (prot_val) {
  38        case _PAGE_CACHE_UC:
  39        default:
  40                err = _set_memory_uc(vaddr, nrpages);
  41                break;
  42        case _PAGE_CACHE_WC:
  43                err = _set_memory_wc(vaddr, nrpages);
  44                break;
  45        case _PAGE_CACHE_WB:
  46                err = _set_memory_wb(vaddr, nrpages);
  47                break;
  48        }
  49
  50        return err;
  51}
  52
  53/*
  54 * Remap an arbitrary physical address space into the kernel virtual
  55 * address space. Needed when the kernel wants to access high addresses
  56 * directly.
  57 *
  58 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  59 * have to convert them into an offset in a page-aligned mapping, but the
  60 * caller shouldn't need to know that small detail.
  61 */
  62static void __iomem *__ioremap_caller(resource_size_t phys_addr,
  63                unsigned long size, unsigned long prot_val, void *caller)
  64{
  65        unsigned long offset, vaddr;
  66        resource_size_t pfn, last_pfn, last_addr;
  67        const resource_size_t unaligned_phys_addr = phys_addr;
  68        const unsigned long unaligned_size = size;
  69        struct vm_struct *area;
  70        unsigned long new_prot_val;
  71        pgprot_t prot;
  72        int retval;
  73        void __iomem *ret_addr;
  74
  75        /* Don't allow wraparound or zero size */
  76        last_addr = phys_addr + size - 1;
  77        if (!size || last_addr < phys_addr)
  78                return NULL;
  79
  80        if (!phys_addr_valid(phys_addr)) {
  81                printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
  82                       (unsigned long long)phys_addr);
  83                WARN_ON_ONCE(1);
  84                return NULL;
  85        }
  86
  87        /*
  88         * Don't remap the low PCI/ISA area, it's always mapped..
  89         */
  90        if (is_ISA_range(phys_addr, last_addr))
  91                return (__force void __iomem *)phys_to_virt(phys_addr);
  92
  93        /*
  94         * Don't allow anybody to remap normal RAM that we're using..
  95         */
  96        last_pfn = last_addr >> PAGE_SHIFT;
  97        for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
  98                int is_ram = page_is_ram(pfn);
  99
 100                if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
 101                        return NULL;
 102                WARN_ON_ONCE(is_ram);
 103        }
 104
 105        /*
 106         * Mappings have to be page-aligned
 107         */
 108        offset = phys_addr & ~PAGE_MASK;
 109        phys_addr &= PHYSICAL_PAGE_MASK;
 110        size = PAGE_ALIGN(last_addr+1) - phys_addr;
 111
 112        retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
 113                                                prot_val, &new_prot_val);
 114        if (retval) {
 115                printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
 116                return NULL;
 117        }
 118
 119        if (prot_val != new_prot_val) {
 120                if (!is_new_memtype_allowed(phys_addr, size,
 121                                            prot_val, new_prot_val)) {
 122                        printk(KERN_ERR
 123                "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
 124                                (unsigned long long)phys_addr,
 125                                (unsigned long long)(phys_addr + size),
 126                                prot_val, new_prot_val);
 127                        goto err_free_memtype;
 128                }
 129                prot_val = new_prot_val;
 130        }
 131
 132        switch (prot_val) {
 133        case _PAGE_CACHE_UC:
 134        default:
 135                prot = PAGE_KERNEL_IO_NOCACHE;
 136                break;
 137        case _PAGE_CACHE_UC_MINUS:
 138                prot = PAGE_KERNEL_IO_UC_MINUS;
 139                break;
 140        case _PAGE_CACHE_WC:
 141                prot = PAGE_KERNEL_IO_WC;
 142                break;
 143        case _PAGE_CACHE_WB:
 144                prot = PAGE_KERNEL_IO;
 145                break;
 146        }
 147
 148        /*
 149         * Ok, go for it..
 150         */
 151        area = get_vm_area_caller(size, VM_IOREMAP, caller);
 152        if (!area)
 153                goto err_free_memtype;
 154        area->phys_addr = phys_addr;
 155        vaddr = (unsigned long) area->addr;
 156
 157        if (kernel_map_sync_memtype(phys_addr, size, prot_val))
 158                goto err_free_area;
 159
 160        if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
 161                goto err_free_area;
 162
 163        ret_addr = (void __iomem *) (vaddr + offset);
 164        mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
 165
 166        /*
 167         * Check if the request spans more than any BAR in the iomem resource
 168         * tree.
 169         */
 170        WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
 171                  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
 172
 173        return ret_addr;
 174err_free_area:
 175        free_vm_area(area);
 176err_free_memtype:
 177        free_memtype(phys_addr, phys_addr + size);
 178        return NULL;
 179}
 180
 181/**
 182 * ioremap_nocache     -   map bus memory into CPU space
 183 * @phys_addr:    bus address of the memory
 184 * @size:      size of the resource to map
 185 *
 186 * ioremap_nocache performs a platform specific sequence of operations to
 187 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 188 * writew/writel functions and the other mmio helpers. The returned
 189 * address is not guaranteed to be usable directly as a virtual
 190 * address.
 191 *
 192 * This version of ioremap ensures that the memory is marked uncachable
 193 * on the CPU as well as honouring existing caching rules from things like
 194 * the PCI bus. Note that there are other caches and buffers on many
 195 * busses. In particular driver authors should read up on PCI writes
 196 *
 197 * It's useful if some control registers are in such an area and
 198 * write combining or read caching is not desirable:
 199 *
 200 * Must be freed with iounmap.
 201 */
 202void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 203{
 204        /*
 205         * Ideally, this should be:
 206         *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
 207         *
 208         * Till we fix all X drivers to use ioremap_wc(), we will use
 209         * UC MINUS.
 210         */
 211        unsigned long val = _PAGE_CACHE_UC_MINUS;
 212
 213        return __ioremap_caller(phys_addr, size, val,
 214                                __builtin_return_address(0));
 215}
 216EXPORT_SYMBOL(ioremap_nocache);
 217
 218/**
 219 * ioremap_wc   -       map memory into CPU space write combined
 220 * @phys_addr:  bus address of the memory
 221 * @size:       size of the resource to map
 222 *
 223 * This version of ioremap ensures that the memory is marked write combining.
 224 * Write combining allows faster writes to some hardware devices.
 225 *
 226 * Must be freed with iounmap.
 227 */
 228void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
 229{
 230        if (pat_enabled)
 231                return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
 232                                        __builtin_return_address(0));
 233        else
 234                return ioremap_nocache(phys_addr, size);
 235}
 236EXPORT_SYMBOL(ioremap_wc);
 237
 238void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 239{
 240        return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
 241                                __builtin_return_address(0));
 242}
 243EXPORT_SYMBOL(ioremap_cache);
 244
 245void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
 246                                unsigned long prot_val)
 247{
 248        return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
 249                                __builtin_return_address(0));
 250}
 251EXPORT_SYMBOL(ioremap_prot);
 252
 253/**
 254 * iounmap - Free a IO remapping
 255 * @addr: virtual address from ioremap_*
 256 *
 257 * Caller must ensure there is only one unmapping for the same pointer.
 258 */
 259void iounmap(volatile void __iomem *addr)
 260{
 261        struct vm_struct *p, *o;
 262
 263        if ((void __force *)addr <= high_memory)
 264                return;
 265
 266        /*
 267         * __ioremap special-cases the PCI/ISA range by not instantiating a
 268         * vm_area and by simply returning an address into the kernel mapping
 269         * of ISA space.   So handle that here.
 270         */
 271        if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
 272            (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
 273                return;
 274
 275        addr = (volatile void __iomem *)
 276                (PAGE_MASK & (unsigned long __force)addr);
 277
 278        mmiotrace_iounmap(addr);
 279
 280        /* Use the vm area unlocked, assuming the caller
 281           ensures there isn't another iounmap for the same address
 282           in parallel. Reuse of the virtual address is prevented by
 283           leaving it in the global lists until we're done with it.
 284           cpa takes care of the direct mappings. */
 285        p = find_vm_area((void __force *)addr);
 286
 287        if (!p) {
 288                printk(KERN_ERR "iounmap: bad address %p\n", addr);
 289                dump_stack();
 290                return;
 291        }
 292
 293        free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
 294
 295        /* Finally remove it */
 296        o = remove_vm_area((void __force *)addr);
 297        BUG_ON(p != o || o == NULL);
 298        kfree(p);
 299}
 300EXPORT_SYMBOL(iounmap);
 301
 302/*
 303 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 304 * access
 305 */
 306void *xlate_dev_mem_ptr(unsigned long phys)
 307{
 308        void *addr;
 309        unsigned long start = phys & PAGE_MASK;
 310
 311        /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
 312        if (page_is_ram(start >> PAGE_SHIFT))
 313                return __va(phys);
 314
 315        addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
 316        if (addr)
 317                addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
 318
 319        return addr;
 320}
 321
 322void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
 323{
 324        if (page_is_ram(phys >> PAGE_SHIFT))
 325                return;
 326
 327        iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
 328        return;
 329}
 330
 331static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
 332
 333static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
 334{
 335        /* Don't assume we're using swapper_pg_dir at this point */
 336        pgd_t *base = __va(read_cr3());
 337        pgd_t *pgd = &base[pgd_index(addr)];
 338        pud_t *pud = pud_offset(pgd, addr);
 339        pmd_t *pmd = pmd_offset(pud, addr);
 340
 341        return pmd;
 342}
 343
 344static inline pte_t * __init early_ioremap_pte(unsigned long addr)
 345{
 346        return &bm_pte[pte_index(addr)];
 347}
 348
 349bool __init is_early_ioremap_ptep(pte_t *ptep)
 350{
 351        return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
 352}
 353
 354void __init early_ioremap_init(void)
 355{
 356        pmd_t *pmd;
 357
 358        early_ioremap_setup();
 359
 360        pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
 361        memset(bm_pte, 0, sizeof(bm_pte));
 362        pmd_populate_kernel(&init_mm, pmd, bm_pte);
 363
 364        /*
 365         * The boot-ioremap range spans multiple pmds, for which
 366         * we are not prepared:
 367         */
 368#define __FIXADDR_TOP (-PAGE_SIZE)
 369        BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
 370                     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
 371#undef __FIXADDR_TOP
 372        if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
 373                WARN_ON(1);
 374                printk(KERN_WARNING "pmd %p != %p\n",
 375                       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
 376                printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
 377                        fix_to_virt(FIX_BTMAP_BEGIN));
 378                printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
 379                        fix_to_virt(FIX_BTMAP_END));
 380
 381                printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
 382                printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
 383                       FIX_BTMAP_BEGIN);
 384        }
 385}
 386
 387void __init __early_set_fixmap(enum fixed_addresses idx,
 388                               phys_addr_t phys, pgprot_t flags)
 389{
 390        unsigned long addr = __fix_to_virt(idx);
 391        pte_t *pte;
 392
 393        if (idx >= __end_of_fixed_addresses) {
 394                BUG();
 395                return;
 396        }
 397        pte = early_ioremap_pte(addr);
 398
 399        if (pgprot_val(flags))
 400                set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
 401        else
 402                pte_clear(&init_mm, addr, pte);
 403        __flush_tlb_one(addr);
 404}
 405