linux/arch/powerpc/mm/pgtable_64.c
<<
>>
Prefs
   1/*
   2 *  This file contains ioremap and related functions for 64-bit machines.
   3 *
   4 *  Derived from arch/ppc64/mm/init.c
   5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   6 *
   7 *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
   8 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
   9 *    Copyright (C) 1996 Paul Mackerras
  10 *
  11 *  Derived from "arch/i386/mm/init.c"
  12 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  13 *
  14 *  Dave Engebretsen <engebret@us.ibm.com>
  15 *      Rework for PPC64 port.
  16 *
  17 *  This program is free software; you can redistribute it and/or
  18 *  modify it under the terms of the GNU General Public License
  19 *  as published by the Free Software Foundation; either version
  20 *  2 of the License, or (at your option) any later version.
  21 *
  22 */
  23
  24#include <linux/signal.h>
  25#include <linux/sched.h>
  26#include <linux/kernel.h>
  27#include <linux/errno.h>
  28#include <linux/string.h>
  29#include <linux/types.h>
  30#include <linux/mman.h>
  31#include <linux/mm.h>
  32#include <linux/swap.h>
  33#include <linux/stddef.h>
  34#include <linux/vmalloc.h>
  35#include <linux/init.h>
  36#include <linux/bootmem.h>
  37#include <linux/lmb.h>
  38
  39#include <asm/pgalloc.h>
  40#include <asm/page.h>
  41#include <asm/prom.h>
  42#include <asm/io.h>
  43#include <asm/mmu_context.h>
  44#include <asm/pgtable.h>
  45#include <asm/mmu.h>
  46#include <asm/smp.h>
  47#include <asm/machdep.h>
  48#include <asm/tlb.h>
  49#include <asm/processor.h>
  50#include <asm/cputable.h>
  51#include <asm/sections.h>
  52#include <asm/system.h>
  53#include <asm/abs_addr.h>
  54#include <asm/firmware.h>
  55
  56#include "mmu_decl.h"
  57
  58unsigned long ioremap_bot = IOREMAP_BASE;
  59
  60
  61#ifdef CONFIG_PPC_MMU_NOHASH
  62static void *early_alloc_pgtable(unsigned long size)
  63{
  64        void *pt;
  65
  66        if (init_bootmem_done)
  67                pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
  68        else
  69                pt = __va(lmb_alloc_base(size, size,
  70                                         __pa(MAX_DMA_ADDRESS)));
  71        memset(pt, 0, size);
  72
  73        return pt;
  74}
  75#endif /* CONFIG_PPC_MMU_NOHASH */
  76
  77/*
  78 * map_kernel_page currently only called by __ioremap
  79 * map_kernel_page adds an entry to the ioremap page table
  80 * and adds an entry to the HPT, possibly bolting it
  81 */
  82int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
  83{
  84        pgd_t *pgdp;
  85        pud_t *pudp;
  86        pmd_t *pmdp;
  87        pte_t *ptep;
  88
  89        if (slab_is_available()) {
  90                pgdp = pgd_offset_k(ea);
  91                pudp = pud_alloc(&init_mm, pgdp, ea);
  92                if (!pudp)
  93                        return -ENOMEM;
  94                pmdp = pmd_alloc(&init_mm, pudp, ea);
  95                if (!pmdp)
  96                        return -ENOMEM;
  97                ptep = pte_alloc_kernel(pmdp, ea);
  98                if (!ptep)
  99                        return -ENOMEM;
 100                set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
 101                                                          __pgprot(flags)));
 102        } else {
 103#ifdef CONFIG_PPC_MMU_NOHASH
 104                /* Warning ! This will blow up if bootmem is not initialized
 105                 * which our ppc64 code is keen to do that, we'll need to
 106                 * fix it and/or be more careful
 107                 */
 108                pgdp = pgd_offset_k(ea);
 109#ifdef PUD_TABLE_SIZE
 110                if (pgd_none(*pgdp)) {
 111                        pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
 112                        BUG_ON(pudp == NULL);
 113                        pgd_populate(&init_mm, pgdp, pudp);
 114                }
 115#endif /* PUD_TABLE_SIZE */
 116                pudp = pud_offset(pgdp, ea);
 117                if (pud_none(*pudp)) {
 118                        pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
 119                        BUG_ON(pmdp == NULL);
 120                        pud_populate(&init_mm, pudp, pmdp);
 121                }
 122                pmdp = pmd_offset(pudp, ea);
 123                if (!pmd_present(*pmdp)) {
 124                        ptep = early_alloc_pgtable(PAGE_SIZE);
 125                        BUG_ON(ptep == NULL);
 126                        pmd_populate_kernel(&init_mm, pmdp, ptep);
 127                }
 128                ptep = pte_offset_kernel(pmdp, ea);
 129                set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
 130                                                          __pgprot(flags)));
 131#else /* CONFIG_PPC_MMU_NOHASH */
 132                /*
 133                 * If the mm subsystem is not fully up, we cannot create a
 134                 * linux page table entry for this mapping.  Simply bolt an
 135                 * entry in the hardware page table.
 136                 *
 137                 */
 138                if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
 139                                      mmu_io_psize, mmu_kernel_ssize)) {
 140                        printk(KERN_ERR "Failed to do bolted mapping IO "
 141                               "memory at %016lx !\n", pa);
 142                        return -ENOMEM;
 143                }
 144#endif /* !CONFIG_PPC_MMU_NOHASH */
 145        }
 146        return 0;
 147}
 148
 149
 150/**
 151 * __ioremap_at - Low level function to establish the page tables
 152 *                for an IO mapping
 153 */
 154void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
 155                            unsigned long flags)
 156{
 157        unsigned long i;
 158
 159        /* Make sure we have the base flags */
 160        if ((flags & _PAGE_PRESENT) == 0)
 161                flags |= pgprot_val(PAGE_KERNEL);
 162
 163        /* Non-cacheable page cannot be coherent */
 164        if (flags & _PAGE_NO_CACHE)
 165                flags &= ~_PAGE_COHERENT;
 166
 167        /* We don't support the 4K PFN hack with ioremap */
 168        if (flags & _PAGE_4K_PFN)
 169                return NULL;
 170
 171        WARN_ON(pa & ~PAGE_MASK);
 172        WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
 173        WARN_ON(size & ~PAGE_MASK);
 174
 175        for (i = 0; i < size; i += PAGE_SIZE)
 176                if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
 177                        return NULL;
 178
 179        return (void __iomem *)ea;
 180}
 181
 182/**
 183 * __iounmap_from - Low level function to tear down the page tables
 184 *                  for an IO mapping. This is used for mappings that
 185 *                  are manipulated manually, like partial unmapping of
 186 *                  PCI IOs or ISA space.
 187 */
 188void __iounmap_at(void *ea, unsigned long size)
 189{
 190        WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
 191        WARN_ON(size & ~PAGE_MASK);
 192
 193        unmap_kernel_range((unsigned long)ea, size);
 194}
 195
 196void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
 197                                unsigned long flags, void *caller)
 198{
 199        phys_addr_t paligned;
 200        void __iomem *ret;
 201
 202        /*
 203         * Choose an address to map it to.
 204         * Once the imalloc system is running, we use it.
 205         * Before that, we map using addresses going
 206         * up from ioremap_bot.  imalloc will use
 207         * the addresses from ioremap_bot through
 208         * IMALLOC_END
 209         * 
 210         */
 211        paligned = addr & PAGE_MASK;
 212        size = PAGE_ALIGN(addr + size) - paligned;
 213
 214        if ((size == 0) || (paligned == 0))
 215                return NULL;
 216
 217        if (mem_init_done) {
 218                struct vm_struct *area;
 219
 220                area = __get_vm_area_caller(size, VM_IOREMAP,
 221                                            ioremap_bot, IOREMAP_END,
 222                                            caller);
 223                if (area == NULL)
 224                        return NULL;
 225                ret = __ioremap_at(paligned, area->addr, size, flags);
 226                if (!ret)
 227                        vunmap(area->addr);
 228        } else {
 229                ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
 230                if (ret)
 231                        ioremap_bot += size;
 232        }
 233
 234        if (ret)
 235                ret += addr & ~PAGE_MASK;
 236        return ret;
 237}
 238
 239void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
 240                         unsigned long flags)
 241{
 242        return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
 243}
 244
 245void __iomem * ioremap(phys_addr_t addr, unsigned long size)
 246{
 247        unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
 248        void *caller = __builtin_return_address(0);
 249
 250        if (ppc_md.ioremap)
 251                return ppc_md.ioremap(addr, size, flags, caller);
 252        return __ioremap_caller(addr, size, flags, caller);
 253}
 254
 255void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
 256                             unsigned long flags)
 257{
 258        void *caller = __builtin_return_address(0);
 259
 260        /* writeable implies dirty for kernel addresses */
 261        if (flags & _PAGE_RW)
 262                flags |= _PAGE_DIRTY;
 263
 264        /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
 265        flags &= ~(_PAGE_USER | _PAGE_EXEC);
 266
 267        if (ppc_md.ioremap)
 268                return ppc_md.ioremap(addr, size, flags, caller);
 269        return __ioremap_caller(addr, size, flags, caller);
 270}
 271
 272
 273/*  
 274 * Unmap an IO region and remove it from imalloc'd list.
 275 * Access to IO memory should be serialized by driver.
 276 */
 277void __iounmap(volatile void __iomem *token)
 278{
 279        void *addr;
 280
 281        if (!mem_init_done)
 282                return;
 283        
 284        addr = (void *) ((unsigned long __force)
 285                         PCI_FIX_ADDR(token) & PAGE_MASK);
 286        if ((unsigned long)addr < ioremap_bot) {
 287                printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
 288                       " at 0x%p\n", addr);
 289                return;
 290        }
 291        vunmap(addr);
 292}
 293
 294void iounmap(volatile void __iomem *token)
 295{
 296        if (ppc_md.iounmap)
 297                ppc_md.iounmap(token);
 298        else
 299                __iounmap(token);
 300}
 301
 302EXPORT_SYMBOL(ioremap);
 303EXPORT_SYMBOL(ioremap_flags);
 304EXPORT_SYMBOL(__ioremap);
 305EXPORT_SYMBOL(__ioremap_at);
 306EXPORT_SYMBOL(iounmap);
 307EXPORT_SYMBOL(__iounmap);
 308EXPORT_SYMBOL(__iounmap_at);
 309