linux/arch/powerpc/mm/pgtable_64.c
<<
>>
Prefs
   1/*
   2 *  This file contains ioremap and related functions for 64-bit machines.
   3 *
   4 *  Derived from arch/ppc64/mm/init.c
   5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   6 *
   7 *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
   8 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
   9 *    Copyright (C) 1996 Paul Mackerras
  10 *
  11 *  Derived from "arch/i386/mm/init.c"
  12 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  13 *
  14 *  Dave Engebretsen <engebret@us.ibm.com>
  15 *      Rework for PPC64 port.
  16 *
  17 *  This program is free software; you can redistribute it and/or
  18 *  modify it under the terms of the GNU General Public License
  19 *  as published by the Free Software Foundation; either version
  20 *  2 of the License, or (at your option) any later version.
  21 *
  22 */
  23
  24#include <linux/signal.h>
  25#include <linux/sched.h>
  26#include <linux/kernel.h>
  27#include <linux/errno.h>
  28#include <linux/string.h>
  29#include <linux/types.h>
  30#include <linux/mman.h>
  31#include <linux/mm.h>
  32#include <linux/swap.h>
  33#include <linux/stddef.h>
  34#include <linux/vmalloc.h>
  35#include <linux/init.h>
  36#include <linux/bootmem.h>
  37#include <linux/memblock.h>
  38#include <linux/slab.h>
  39
  40#include <asm/pgalloc.h>
  41#include <asm/page.h>
  42#include <asm/prom.h>
  43#include <asm/io.h>
  44#include <asm/mmu_context.h>
  45#include <asm/pgtable.h>
  46#include <asm/mmu.h>
  47#include <asm/smp.h>
  48#include <asm/machdep.h>
  49#include <asm/tlb.h>
  50#include <asm/processor.h>
  51#include <asm/cputable.h>
  52#include <asm/sections.h>
  53#include <asm/system.h>
  54#include <asm/abs_addr.h>
  55#include <asm/firmware.h>
  56
  57#include "mmu_decl.h"
  58
  59unsigned long ioremap_bot = IOREMAP_BASE;
  60
  61
  62#ifdef CONFIG_PPC_MMU_NOHASH
  63static void *early_alloc_pgtable(unsigned long size)
  64{
  65        void *pt;
  66
  67        if (init_bootmem_done)
  68                pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
  69        else
  70                pt = __va(memblock_alloc_base(size, size,
  71                                         __pa(MAX_DMA_ADDRESS)));
  72        memset(pt, 0, size);
  73
  74        return pt;
  75}
  76#endif /* CONFIG_PPC_MMU_NOHASH */
  77
  78/*
  79 * map_kernel_page currently only called by __ioremap
  80 * map_kernel_page adds an entry to the ioremap page table
  81 * and adds an entry to the HPT, possibly bolting it
  82 */
  83int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
  84{
  85        pgd_t *pgdp;
  86        pud_t *pudp;
  87        pmd_t *pmdp;
  88        pte_t *ptep;
  89
  90        if (slab_is_available()) {
  91                pgdp = pgd_offset_k(ea);
  92                pudp = pud_alloc(&init_mm, pgdp, ea);
  93                if (!pudp)
  94                        return -ENOMEM;
  95                pmdp = pmd_alloc(&init_mm, pudp, ea);
  96                if (!pmdp)
  97                        return -ENOMEM;
  98                ptep = pte_alloc_kernel(pmdp, ea);
  99                if (!ptep)
 100                        return -ENOMEM;
 101                set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
 102                                                          __pgprot(flags)));
 103        } else {
 104#ifdef CONFIG_PPC_MMU_NOHASH
 105                /* Warning ! This will blow up if bootmem is not initialized
 106                 * which our ppc64 code is keen to do that, we'll need to
 107                 * fix it and/or be more careful
 108                 */
 109                pgdp = pgd_offset_k(ea);
 110#ifdef PUD_TABLE_SIZE
 111                if (pgd_none(*pgdp)) {
 112                        pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
 113                        BUG_ON(pudp == NULL);
 114                        pgd_populate(&init_mm, pgdp, pudp);
 115                }
 116#endif /* PUD_TABLE_SIZE */
 117                pudp = pud_offset(pgdp, ea);
 118                if (pud_none(*pudp)) {
 119                        pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
 120                        BUG_ON(pmdp == NULL);
 121                        pud_populate(&init_mm, pudp, pmdp);
 122                }
 123                pmdp = pmd_offset(pudp, ea);
 124                if (!pmd_present(*pmdp)) {
 125                        ptep = early_alloc_pgtable(PAGE_SIZE);
 126                        BUG_ON(ptep == NULL);
 127                        pmd_populate_kernel(&init_mm, pmdp, ptep);
 128                }
 129                ptep = pte_offset_kernel(pmdp, ea);
 130                set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
 131                                                          __pgprot(flags)));
 132#else /* CONFIG_PPC_MMU_NOHASH */
 133                /*
 134                 * If the mm subsystem is not fully up, we cannot create a
 135                 * linux page table entry for this mapping.  Simply bolt an
 136                 * entry in the hardware page table.
 137                 *
 138                 */
 139                if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
 140                                      mmu_io_psize, mmu_kernel_ssize)) {
 141                        printk(KERN_ERR "Failed to do bolted mapping IO "
 142                               "memory at %016lx !\n", pa);
 143                        return -ENOMEM;
 144                }
 145#endif /* !CONFIG_PPC_MMU_NOHASH */
 146        }
 147        return 0;
 148}
 149
 150
 151/**
 152 * __ioremap_at - Low level function to establish the page tables
 153 *                for an IO mapping
 154 */
 155void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
 156                            unsigned long flags)
 157{
 158        unsigned long i;
 159
 160        /* Make sure we have the base flags */
 161        if ((flags & _PAGE_PRESENT) == 0)
 162                flags |= pgprot_val(PAGE_KERNEL);
 163
 164        /* Non-cacheable page cannot be coherent */
 165        if (flags & _PAGE_NO_CACHE)
 166                flags &= ~_PAGE_COHERENT;
 167
 168        /* We don't support the 4K PFN hack with ioremap */
 169        if (flags & _PAGE_4K_PFN)
 170                return NULL;
 171
 172        WARN_ON(pa & ~PAGE_MASK);
 173        WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
 174        WARN_ON(size & ~PAGE_MASK);
 175
 176        for (i = 0; i < size; i += PAGE_SIZE)
 177                if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
 178                        return NULL;
 179
 180        return (void __iomem *)ea;
 181}
 182
 183/**
 184 * __iounmap_from - Low level function to tear down the page tables
 185 *                  for an IO mapping. This is used for mappings that
 186 *                  are manipulated manually, like partial unmapping of
 187 *                  PCI IOs or ISA space.
 188 */
 189void __iounmap_at(void *ea, unsigned long size)
 190{
 191        WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
 192        WARN_ON(size & ~PAGE_MASK);
 193
 194        unmap_kernel_range((unsigned long)ea, size);
 195}
 196
 197void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
 198                                unsigned long flags, void *caller)
 199{
 200        phys_addr_t paligned;
 201        void __iomem *ret;
 202
 203        /*
 204         * Choose an address to map it to.
 205         * Once the imalloc system is running, we use it.
 206         * Before that, we map using addresses going
 207         * up from ioremap_bot.  imalloc will use
 208         * the addresses from ioremap_bot through
 209         * IMALLOC_END
 210         * 
 211         */
 212        paligned = addr & PAGE_MASK;
 213        size = PAGE_ALIGN(addr + size) - paligned;
 214
 215        if ((size == 0) || (paligned == 0))
 216                return NULL;
 217
 218        if (mem_init_done) {
 219                struct vm_struct *area;
 220
 221                area = __get_vm_area_caller(size, VM_IOREMAP,
 222                                            ioremap_bot, IOREMAP_END,
 223                                            caller);
 224                if (area == NULL)
 225                        return NULL;
 226
 227                area->phys_addr = paligned;
 228                ret = __ioremap_at(paligned, area->addr, size, flags);
 229                if (!ret)
 230                        vunmap(area->addr);
 231        } else {
 232                ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
 233                if (ret)
 234                        ioremap_bot += size;
 235        }
 236
 237        if (ret)
 238                ret += addr & ~PAGE_MASK;
 239        return ret;
 240}
 241
 242void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
 243                         unsigned long flags)
 244{
 245        return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
 246}
 247
 248void __iomem * ioremap(phys_addr_t addr, unsigned long size)
 249{
 250        unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
 251        void *caller = __builtin_return_address(0);
 252
 253        if (ppc_md.ioremap)
 254                return ppc_md.ioremap(addr, size, flags, caller);
 255        return __ioremap_caller(addr, size, flags, caller);
 256}
 257
 258void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
 259{
 260        unsigned long flags = _PAGE_NO_CACHE;
 261        void *caller = __builtin_return_address(0);
 262
 263        if (ppc_md.ioremap)
 264                return ppc_md.ioremap(addr, size, flags, caller);
 265        return __ioremap_caller(addr, size, flags, caller);
 266}
 267
 268void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
 269                             unsigned long flags)
 270{
 271        void *caller = __builtin_return_address(0);
 272
 273        /* writeable implies dirty for kernel addresses */
 274        if (flags & _PAGE_RW)
 275                flags |= _PAGE_DIRTY;
 276
 277        /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
 278        flags &= ~(_PAGE_USER | _PAGE_EXEC);
 279
 280#ifdef _PAGE_BAP_SR
 281        /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
 282         * which means that we just cleared supervisor access... oops ;-) This
 283         * restores it
 284         */
 285        flags |= _PAGE_BAP_SR;
 286#endif
 287
 288        if (ppc_md.ioremap)
 289                return ppc_md.ioremap(addr, size, flags, caller);
 290        return __ioremap_caller(addr, size, flags, caller);
 291}
 292
 293
 294/*  
 295 * Unmap an IO region and remove it from imalloc'd list.
 296 * Access to IO memory should be serialized by driver.
 297 */
 298void __iounmap(volatile void __iomem *token)
 299{
 300        void *addr;
 301
 302        if (!mem_init_done)
 303                return;
 304        
 305        addr = (void *) ((unsigned long __force)
 306                         PCI_FIX_ADDR(token) & PAGE_MASK);
 307        if ((unsigned long)addr < ioremap_bot) {
 308                printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
 309                       " at 0x%p\n", addr);
 310                return;
 311        }
 312        vunmap(addr);
 313}
 314
 315void iounmap(volatile void __iomem *token)
 316{
 317        if (ppc_md.iounmap)
 318                ppc_md.iounmap(token);
 319        else
 320                __iounmap(token);
 321}
 322
 323EXPORT_SYMBOL(ioremap);
 324EXPORT_SYMBOL(ioremap_wc);
 325EXPORT_SYMBOL(ioremap_prot);
 326EXPORT_SYMBOL(__ioremap);
 327EXPORT_SYMBOL(__ioremap_at);
 328EXPORT_SYMBOL(iounmap);
 329EXPORT_SYMBOL(__iounmap);
 330EXPORT_SYMBOL(__iounmap_at);
 331