linux/arch/powerpc/mm/init_64.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
   6 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
   7 *    Copyright (C) 1996 Paul Mackerras
   8 *
   9 *  Derived from "arch/i386/mm/init.c"
  10 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  11 *
  12 *  Dave Engebretsen <engebret@us.ibm.com>
  13 *      Rework for PPC64 port.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 *
  20 */
  21
  22#undef DEBUG
  23
  24#include <linux/signal.h>
  25#include <linux/sched.h>
  26#include <linux/kernel.h>
  27#include <linux/errno.h>
  28#include <linux/string.h>
  29#include <linux/types.h>
  30#include <linux/mman.h>
  31#include <linux/mm.h>
  32#include <linux/swap.h>
  33#include <linux/stddef.h>
  34#include <linux/vmalloc.h>
  35#include <linux/init.h>
  36#include <linux/delay.h>
  37#include <linux/bootmem.h>
  38#include <linux/highmem.h>
  39#include <linux/idr.h>
  40#include <linux/nodemask.h>
  41#include <linux/module.h>
  42#include <linux/poison.h>
  43#include <linux/memblock.h>
  44#include <linux/hugetlb.h>
  45#include <linux/slab.h>
  46
  47#include <asm/pgalloc.h>
  48#include <asm/page.h>
  49#include <asm/prom.h>
  50#include <asm/rtas.h>
  51#include <asm/io.h>
  52#include <asm/mmu_context.h>
  53#include <asm/pgtable.h>
  54#include <asm/mmu.h>
  55#include <asm/uaccess.h>
  56#include <asm/smp.h>
  57#include <asm/machdep.h>
  58#include <asm/tlb.h>
  59#include <asm/eeh.h>
  60#include <asm/processor.h>
  61#include <asm/mmzone.h>
  62#include <asm/cputable.h>
  63#include <asm/sections.h>
  64#include <asm/iommu.h>
  65#include <asm/vdso.h>
  66
  67#include "mmu_decl.h"
  68
  69#ifdef CONFIG_PPC_STD_MMU_64
  70#if PGTABLE_RANGE > USER_VSID_RANGE
  71#warning Limited user VSID range means pagetable space is wasted
  72#endif
  73
  74#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
  75#warning TASK_SIZE is smaller than it needs to be.
  76#endif
  77#endif /* CONFIG_PPC_STD_MMU_64 */
  78
  79phys_addr_t memstart_addr = ~0;
  80EXPORT_SYMBOL_GPL(memstart_addr);
  81phys_addr_t kernstart_addr;
  82EXPORT_SYMBOL_GPL(kernstart_addr);
  83
  84static void pgd_ctor(void *addr)
  85{
  86        memset(addr, 0, PGD_TABLE_SIZE);
  87}
  88
  89static void pmd_ctor(void *addr)
  90{
  91#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  92        memset(addr, 0, PMD_TABLE_SIZE * 2);
  93#else
  94        memset(addr, 0, PMD_TABLE_SIZE);
  95#endif
  96}
  97
  98struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
  99
 100/*
 101 * Create a kmem_cache() for pagetables.  This is not used for PTE
 102 * pages - they're linked to struct page, come from the normal free
 103 * pages pool and have a different entry size (see real_pte_t) to
 104 * everything else.  Caches created by this function are used for all
 105 * the higher level pagetables, and for hugepage pagetables.
 106 */
 107void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
 108{
 109        char *name;
 110        unsigned long table_size = sizeof(void *) << shift;
 111        unsigned long align = table_size;
 112
 113        /* When batching pgtable pointers for RCU freeing, we store
 114         * the index size in the low bits.  Table alignment must be
 115         * big enough to fit it.
 116         *
 117         * Likewise, hugeapge pagetable pointers contain a (different)
 118         * shift value in the low bits.  All tables must be aligned so
 119         * as to leave enough 0 bits in the address to contain it. */
 120        unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
 121                                     HUGEPD_SHIFT_MASK + 1);
 122        struct kmem_cache *new;
 123
 124        /* It would be nice if this was a BUILD_BUG_ON(), but at the
 125         * moment, gcc doesn't seem to recognize is_power_of_2 as a
 126         * constant expression, so so much for that. */
 127        BUG_ON(!is_power_of_2(minalign));
 128        BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
 129
 130        if (PGT_CACHE(shift))
 131                return; /* Already have a cache of this size */
 132
 133        align = max_t(unsigned long, align, minalign);
 134        name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
 135        new = kmem_cache_create(name, table_size, align, 0, ctor);
 136        kfree(name);
 137        pgtable_cache[shift - 1] = new;
 138        pr_debug("Allocated pgtable cache for order %d\n", shift);
 139}
 140
 141
 142void pgtable_cache_init(void)
 143{
 144        pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
 145        pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
 146        if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
 147                panic("Couldn't allocate pgtable caches");
 148        /* In all current configs, when the PUD index exists it's the
 149         * same size as either the pgd or pmd index.  Verify that the
 150         * initialization above has also created a PUD cache.  This
 151         * will need re-examiniation if we add new possibilities for
 152         * the pagetable layout. */
 153        BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
 154}
 155
 156#ifdef CONFIG_SPARSEMEM_VMEMMAP
 157/*
 158 * Given an address within the vmemmap, determine the pfn of the page that
 159 * represents the start of the section it is within.  Note that we have to
 160 * do this by hand as the proffered address may not be correctly aligned.
 161 * Subtraction of non-aligned pointers produces undefined results.
 162 */
 163static unsigned long __meminit vmemmap_section_start(unsigned long page)
 164{
 165        unsigned long offset = page - ((unsigned long)(vmemmap));
 166
 167        /* Return the pfn of the start of the section. */
 168        return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
 169}
 170
 171/*
 172 * Check if this vmemmap page is already initialised.  If any section
 173 * which overlaps this vmemmap page is initialised then this page is
 174 * initialised already.
 175 */
 176static int __meminit vmemmap_populated(unsigned long start, int page_size)
 177{
 178        unsigned long end = start + page_size;
 179        start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
 180
 181        for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
 182                if (pfn_valid(page_to_pfn((struct page *)start)))
 183                        return 1;
 184
 185        return 0;
 186}
 187
 188/* On hash-based CPUs, the vmemmap is bolted in the hash table.
 189 *
 190 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
 191 * the vmalloc space using normal page tables, though the size of
 192 * pages encoded in the PTEs can be different
 193 */
 194
 195#ifdef CONFIG_PPC_BOOK3E
 196static void __meminit vmemmap_create_mapping(unsigned long start,
 197                                             unsigned long page_size,
 198                                             unsigned long phys)
 199{
 200        /* Create a PTE encoding without page size */
 201        unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
 202                _PAGE_KERNEL_RW;
 203
 204        /* PTEs only contain page size encodings up to 32M */
 205        BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
 206
 207        /* Encode the size in the PTE */
 208        flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
 209
 210        /* For each PTE for that area, map things. Note that we don't
 211         * increment phys because all PTEs are of the large size and
 212         * thus must have the low bits clear
 213         */
 214        for (i = 0; i < page_size; i += PAGE_SIZE)
 215                BUG_ON(map_kernel_page(start + i, phys, flags));
 216}
 217
 218#ifdef CONFIG_MEMORY_HOTPLUG
 219static void vmemmap_remove_mapping(unsigned long start,
 220                                   unsigned long page_size)
 221{
 222}
 223#endif
 224#else /* CONFIG_PPC_BOOK3E */
 225static void __meminit vmemmap_create_mapping(unsigned long start,
 226                                             unsigned long page_size,
 227                                             unsigned long phys)
 228{
 229        int  mapped = htab_bolt_mapping(start, start + page_size, phys,
 230                                        pgprot_val(PAGE_KERNEL),
 231                                        mmu_vmemmap_psize,
 232                                        mmu_kernel_ssize);
 233        BUG_ON(mapped < 0);
 234}
 235
 236#ifdef CONFIG_MEMORY_HOTPLUG
 237extern int htab_remove_mapping(unsigned long vstart, unsigned long vend,
 238                        int psize, int ssize);
 239
 240static void vmemmap_remove_mapping(unsigned long start,
 241                                   unsigned long page_size)
 242{
 243        int mapped = htab_remove_mapping(start, start + page_size,
 244                                         mmu_vmemmap_psize,
 245                                         mmu_kernel_ssize);
 246        BUG_ON(mapped < 0);
 247}
 248#endif
 249
 250#endif /* CONFIG_PPC_BOOK3E */
 251
 252struct vmemmap_backing *vmemmap_list;
 253static struct vmemmap_backing *next;
 254static int num_left;
 255static int num_freed;
 256
 257static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
 258{
 259        struct vmemmap_backing *vmem_back;
 260        /* get from freed entries first */
 261        if (num_freed) {
 262                num_freed--;
 263                vmem_back = next;
 264                next = next->list;
 265
 266                return vmem_back;
 267        }
 268
 269        /* allocate a page when required and hand out chunks */
 270        if (!num_left) {
 271                next = vmemmap_alloc_block(PAGE_SIZE, node);
 272                if (unlikely(!next)) {
 273                        WARN_ON(1);
 274                        return NULL;
 275                }
 276                num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
 277        }
 278
 279        num_left--;
 280
 281        return next++;
 282}
 283
 284static __meminit void vmemmap_list_populate(unsigned long phys,
 285                                            unsigned long start,
 286                                            int node)
 287{
 288        struct vmemmap_backing *vmem_back;
 289
 290        vmem_back = vmemmap_list_alloc(node);
 291        if (unlikely(!vmem_back)) {
 292                WARN_ON(1);
 293                return;
 294        }
 295
 296        vmem_back->phys = phys;
 297        vmem_back->virt_addr = start;
 298        vmem_back->list = vmemmap_list;
 299
 300        vmemmap_list = vmem_back;
 301}
 302
 303int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 304                struct vmem_altmap *altmap)
 305{
 306        unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 307
 308        /* Align to the page size of the linear mapping. */
 309        start = _ALIGN_DOWN(start, page_size);
 310
 311        pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
 312
 313        for (; start < end; start += page_size) {
 314                void *p;
 315
 316                if (vmemmap_populated(start, page_size))
 317                        continue;
 318
 319                p = vmemmap_alloc_block(page_size, node);
 320                if (!p)
 321                        return -ENOMEM;
 322
 323                vmemmap_list_populate(__pa(p), start, node);
 324
 325                pr_debug("      * %016lx..%016lx allocated at %p\n",
 326                         start, start + page_size, p);
 327
 328                vmemmap_create_mapping(start, page_size, __pa(p));
 329        }
 330
 331        return 0;
 332}
 333
 334#ifdef CONFIG_MEMORY_HOTPLUG
 335static unsigned long vmemmap_list_free(unsigned long start)
 336{
 337        struct vmemmap_backing *vmem_back, *vmem_back_prev;
 338
 339        vmem_back_prev = vmem_back = vmemmap_list;
 340
 341        /* look for it with prev pointer recorded */
 342        for (; vmem_back; vmem_back = vmem_back->list) {
 343                if (vmem_back->virt_addr == start)
 344                        break;
 345                vmem_back_prev = vmem_back;
 346        }
 347
 348        if (unlikely(!vmem_back)) {
 349                WARN_ON(1);
 350                return 0;
 351        }
 352
 353        /* remove it from vmemmap_list */
 354        if (vmem_back == vmemmap_list) /* remove head */
 355                vmemmap_list = vmem_back->list;
 356        else
 357                vmem_back_prev->list = vmem_back->list;
 358
 359        /* next point to this freed entry */
 360        vmem_back->list = next;
 361        next = vmem_back;
 362        num_freed++;
 363
 364        return vmem_back->phys;
 365}
 366
 367void __ref vmemmap_free(unsigned long start, unsigned long end,
 368                struct vmem_altmap *altmap)
 369{
 370        unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 371
 372        start = _ALIGN_DOWN(start, page_size);
 373
 374        pr_debug("vmemmap_free %lx...%lx\n", start, end);
 375
 376        for (; start < end; start += page_size) {
 377                unsigned long addr;
 378
 379                /*
 380                 * the section has already be marked as invalid, so
 381                 * vmemmap_populated() true means some other sections still
 382                 * in this page, so skip it.
 383                 */
 384                if (vmemmap_populated(start, page_size))
 385                        continue;
 386
 387                addr = vmemmap_list_free(start);
 388                if (addr) {
 389                        struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
 390
 391                        if (PageReserved(page)) {
 392                                /* allocated from bootmem */
 393                                if (page_size < PAGE_SIZE) {
 394                                        /*
 395                                         * this shouldn't happen, but if it is
 396                                         * the case, leave the memory there
 397                                         */
 398                                        WARN_ON_ONCE(1);
 399                                } else {
 400                                        unsigned int nr_pages =
 401                                                1 << get_order(page_size);
 402                                        while (nr_pages--)
 403                                                free_reserved_page(page++);
 404                                }
 405                        } else
 406                                free_pages((unsigned long)(__va(addr)),
 407                                                        get_order(page_size));
 408
 409                        vmemmap_remove_mapping(start, page_size);
 410                }
 411        }
 412}
 413#endif
 414void register_page_bootmem_memmap(unsigned long section_nr,
 415                                  struct page *start_page, unsigned long size)
 416{
 417}
 418#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 419
 420