linux/arch/powerpc/mm/pgtable_64.c
<<
>>
Prefs
   1/*
   2 *  This file contains ioremap and related functions for 64-bit machines.
   3 *
   4 *  Derived from arch/ppc64/mm/init.c
   5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   6 *
   7 *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
   8 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
   9 *    Copyright (C) 1996 Paul Mackerras
  10 *
  11 *  Derived from "arch/i386/mm/init.c"
  12 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  13 *
  14 *  Dave Engebretsen <engebret@us.ibm.com>
  15 *      Rework for PPC64 port.
  16 *
  17 *  This program is free software; you can redistribute it and/or
  18 *  modify it under the terms of the GNU General Public License
  19 *  as published by the Free Software Foundation; either version
  20 *  2 of the License, or (at your option) any later version.
  21 *
  22 */
  23
  24#include <linux/signal.h>
  25#include <linux/sched.h>
  26#include <linux/kernel.h>
  27#include <linux/errno.h>
  28#include <linux/string.h>
  29#include <linux/export.h>
  30#include <linux/types.h>
  31#include <linux/mman.h>
  32#include <linux/mm.h>
  33#include <linux/swap.h>
  34#include <linux/stddef.h>
  35#include <linux/vmalloc.h>
  36#include <linux/slab.h>
  37#include <linux/hugetlb.h>
  38
  39#include <asm/pgalloc.h>
  40#include <asm/page.h>
  41#include <asm/prom.h>
  42#include <asm/io.h>
  43#include <asm/mmu_context.h>
  44#include <asm/pgtable.h>
  45#include <asm/mmu.h>
  46#include <asm/smp.h>
  47#include <asm/machdep.h>
  48#include <asm/tlb.h>
  49#include <asm/processor.h>
  50#include <asm/cputable.h>
  51#include <asm/sections.h>
  52#include <asm/firmware.h>
  53#include <asm/dma.h>
  54
  55#include <mm/mmu_decl.h>
  56
  57
  58#ifdef CONFIG_PPC_BOOK3S_64
  59/*
  60 * partition table and process table for ISA 3.0
  61 */
  62struct prtb_entry *process_tb;
  63struct patb_entry *partition_tb;
  64/*
  65 * page table size
  66 */
  67unsigned long __pte_index_size;
  68EXPORT_SYMBOL(__pte_index_size);
  69unsigned long __pmd_index_size;
  70EXPORT_SYMBOL(__pmd_index_size);
  71unsigned long __pud_index_size;
  72EXPORT_SYMBOL(__pud_index_size);
  73unsigned long __pgd_index_size;
  74EXPORT_SYMBOL(__pgd_index_size);
  75unsigned long __pud_cache_index;
  76EXPORT_SYMBOL(__pud_cache_index);
  77unsigned long __pte_table_size;
  78EXPORT_SYMBOL(__pte_table_size);
  79unsigned long __pmd_table_size;
  80EXPORT_SYMBOL(__pmd_table_size);
  81unsigned long __pud_table_size;
  82EXPORT_SYMBOL(__pud_table_size);
  83unsigned long __pgd_table_size;
  84EXPORT_SYMBOL(__pgd_table_size);
  85unsigned long __pmd_val_bits;
  86EXPORT_SYMBOL(__pmd_val_bits);
  87unsigned long __pud_val_bits;
  88EXPORT_SYMBOL(__pud_val_bits);
  89unsigned long __pgd_val_bits;
  90EXPORT_SYMBOL(__pgd_val_bits);
  91unsigned long __kernel_virt_start;
  92EXPORT_SYMBOL(__kernel_virt_start);
  93unsigned long __kernel_virt_size;
  94EXPORT_SYMBOL(__kernel_virt_size);
  95unsigned long __vmalloc_start;
  96EXPORT_SYMBOL(__vmalloc_start);
  97unsigned long __vmalloc_end;
  98EXPORT_SYMBOL(__vmalloc_end);
  99unsigned long __kernel_io_start;
 100EXPORT_SYMBOL(__kernel_io_start);
 101struct page *vmemmap;
 102EXPORT_SYMBOL(vmemmap);
 103unsigned long __pte_frag_nr;
 104EXPORT_SYMBOL(__pte_frag_nr);
 105unsigned long __pte_frag_size_shift;
 106EXPORT_SYMBOL(__pte_frag_size_shift);
 107unsigned long ioremap_bot;
 108#else /* !CONFIG_PPC_BOOK3S_64 */
 109unsigned long ioremap_bot = IOREMAP_BASE;
 110#endif
 111
 112/**
 113 * __ioremap_at - Low level function to establish the page tables
 114 *                for an IO mapping
 115 */
 116void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
 117{
 118        unsigned long i;
 119
 120        /* We don't support the 4K PFN hack with ioremap */
 121        if (pgprot_val(prot) & H_PAGE_4K_PFN)
 122                return NULL;
 123
 124        WARN_ON(pa & ~PAGE_MASK);
 125        WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
 126        WARN_ON(size & ~PAGE_MASK);
 127
 128        for (i = 0; i < size; i += PAGE_SIZE)
 129                if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
 130                        return NULL;
 131
 132        return (void __iomem *)ea;
 133}
 134
 135/**
 136 * __iounmap_from - Low level function to tear down the page tables
 137 *                  for an IO mapping. This is used for mappings that
 138 *                  are manipulated manually, like partial unmapping of
 139 *                  PCI IOs or ISA space.
 140 */
 141void __iounmap_at(void *ea, unsigned long size)
 142{
 143        WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
 144        WARN_ON(size & ~PAGE_MASK);
 145
 146        unmap_kernel_range((unsigned long)ea, size);
 147}
 148
 149void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
 150                                pgprot_t prot, void *caller)
 151{
 152        phys_addr_t paligned;
 153        void __iomem *ret;
 154
 155        /*
 156         * Choose an address to map it to.
 157         * Once the imalloc system is running, we use it.
 158         * Before that, we map using addresses going
 159         * up from ioremap_bot.  imalloc will use
 160         * the addresses from ioremap_bot through
 161         * IMALLOC_END
 162         * 
 163         */
 164        paligned = addr & PAGE_MASK;
 165        size = PAGE_ALIGN(addr + size) - paligned;
 166
 167        if ((size == 0) || (paligned == 0))
 168                return NULL;
 169
 170        if (slab_is_available()) {
 171                struct vm_struct *area;
 172
 173                area = __get_vm_area_caller(size, VM_IOREMAP,
 174                                            ioremap_bot, IOREMAP_END,
 175                                            caller);
 176                if (area == NULL)
 177                        return NULL;
 178
 179                area->phys_addr = paligned;
 180                ret = __ioremap_at(paligned, area->addr, size, prot);
 181                if (!ret)
 182                        vunmap(area->addr);
 183        } else {
 184                ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
 185                if (ret)
 186                        ioremap_bot += size;
 187        }
 188
 189        if (ret)
 190                ret += addr & ~PAGE_MASK;
 191        return ret;
 192}
 193
 194#ifdef CONFIG_ZONE_DEVICE
 195/*
 196 * Override the generic version in mm/memremap.c.
 197 *
 198 * With hash translation, the direct-map range is mapped with just one
 199 * page size selected by htab_init_page_sizes(). Consult
 200 * mmu_psize_defs[] to determine the minimum page size alignment.
 201 */
 202unsigned long memremap_compat_align(void)
 203{
 204        unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
 205
 206        if (radix_enabled())
 207                return SUBSECTION_SIZE;
 208        return max(SUBSECTION_SIZE, 1UL << shift);
 209}
 210EXPORT_SYMBOL_GPL(memremap_compat_align);
 211#endif
 212
 213void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
 214                         unsigned long flags)
 215{
 216        return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
 217}
 218
 219void __iomem * ioremap(phys_addr_t addr, unsigned long size)
 220{
 221        pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
 222        void *caller = __builtin_return_address(0);
 223
 224        if (ppc_md.ioremap)
 225                return ppc_md.ioremap(addr, size, prot, caller);
 226        return __ioremap_caller(addr, size, prot, caller);
 227}
 228
 229void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
 230{
 231        pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
 232        void *caller = __builtin_return_address(0);
 233
 234        if (ppc_md.ioremap)
 235                return ppc_md.ioremap(addr, size, prot, caller);
 236        return __ioremap_caller(addr, size, prot, caller);
 237}
 238
 239void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
 240{
 241        pgprot_t prot = pgprot_cached(PAGE_KERNEL);
 242        void *caller = __builtin_return_address(0);
 243
 244        if (ppc_md.ioremap)
 245                return ppc_md.ioremap(addr, size, prot, caller);
 246        return __ioremap_caller(addr, size, prot, caller);
 247}
 248
 249void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
 250                             unsigned long flags)
 251{
 252        void *caller = __builtin_return_address(0);
 253
 254        /* writeable implies dirty for kernel addresses */
 255        if (flags & _PAGE_WRITE)
 256                flags |= _PAGE_DIRTY;
 257
 258        /* we don't want to let _PAGE_EXEC leak out */
 259        flags &= ~_PAGE_EXEC;
 260        /*
 261         * Force kernel mapping.
 262         */
 263        flags &= ~_PAGE_USER;
 264        flags |= _PAGE_PRIVILEGED;
 265
 266        if (ppc_md.ioremap)
 267                return ppc_md.ioremap(addr, size, __pgprot(flags), caller);
 268        return __ioremap_caller(addr, size, __pgprot(flags), caller);
 269}
 270
 271
 272/*  
 273 * Unmap an IO region and remove it from imalloc'd list.
 274 * Access to IO memory should be serialized by driver.
 275 */
 276void __iounmap(volatile void __iomem *token)
 277{
 278        void *addr;
 279
 280        if (!slab_is_available())
 281                return;
 282        
 283        addr = (void *) ((unsigned long __force)
 284                         PCI_FIX_ADDR(token) & PAGE_MASK);
 285        if ((unsigned long)addr < ioremap_bot) {
 286                printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
 287                       " at 0x%p\n", addr);
 288                return;
 289        }
 290        vunmap(addr);
 291}
 292
 293void iounmap(volatile void __iomem *token)
 294{
 295        if (ppc_md.iounmap)
 296                ppc_md.iounmap(token);
 297        else
 298                __iounmap(token);
 299}
 300
 301EXPORT_SYMBOL(ioremap);
 302EXPORT_SYMBOL(ioremap_wc);
 303EXPORT_SYMBOL(ioremap_prot);
 304EXPORT_SYMBOL(__ioremap);
 305EXPORT_SYMBOL(__ioremap_at);
 306EXPORT_SYMBOL(iounmap);
 307EXPORT_SYMBOL(__iounmap);
 308EXPORT_SYMBOL(__iounmap_at);
 309
 310#ifndef __PAGETABLE_PUD_FOLDED
 311/* 4 level page table */
 312struct page *pgd_page(pgd_t pgd)
 313{
 314        if (pgd_is_leaf(pgd)) {
 315                VM_WARN_ON(!pgd_huge(pgd));
 316                return pte_page(pgd_pte(pgd));
 317        }
 318        return virt_to_page(pgd_page_vaddr(pgd));
 319}
 320#endif
 321
 322struct page *pud_page(pud_t pud)
 323{
 324        if (pud_is_leaf(pud)) {
 325                VM_WARN_ON(!pud_huge(pud));
 326                return pte_page(pud_pte(pud));
 327        }
 328        return virt_to_page(pud_page_vaddr(pud));
 329}
 330
 331/*
 332 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
 333 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
 334 */
 335struct page *pmd_page(pmd_t pmd)
 336{
 337        if (pmd_is_leaf(pmd)) {
 338                VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd)));
 339                return pte_page(pmd_pte(pmd));
 340        }
 341        return virt_to_page(pmd_page_vaddr(pmd));
 342}
 343
 344#ifdef CONFIG_STRICT_KERNEL_RWX
 345void mark_rodata_ro(void)
 346{
 347        if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
 348                pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
 349                return;
 350        }
 351
 352        if (radix_enabled())
 353                radix__mark_rodata_ro();
 354        else
 355                hash__mark_rodata_ro();
 356}
 357
 358void mark_initmem_nx(void)
 359{
 360        if (radix_enabled())
 361                radix__mark_initmem_nx();
 362        else
 363                hash__mark_initmem_nx();
 364}
 365#endif
 366