linux/arch/powerpc/mm/init_64.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
   6 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
   7 *    Copyright (C) 1996 Paul Mackerras
   8 *
   9 *  Derived from "arch/i386/mm/init.c"
  10 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  11 *
  12 *  Dave Engebretsen <engebret@us.ibm.com>
  13 *      Rework for PPC64 port.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 *
  20 */
  21
  22#undef DEBUG
  23
  24#include <linux/signal.h>
  25#include <linux/sched.h>
  26#include <linux/kernel.h>
  27#include <linux/errno.h>
  28#include <linux/string.h>
  29#include <linux/types.h>
  30#include <linux/mman.h>
  31#include <linux/mm.h>
  32#include <linux/swap.h>
  33#include <linux/stddef.h>
  34#include <linux/vmalloc.h>
  35#include <linux/init.h>
  36#include <linux/delay.h>
  37#include <linux/highmem.h>
  38#include <linux/idr.h>
  39#include <linux/nodemask.h>
  40#include <linux/module.h>
  41#include <linux/poison.h>
  42#include <linux/memblock.h>
  43#include <linux/hugetlb.h>
  44#include <linux/slab.h>
  45
  46#include <asm/pgalloc.h>
  47#include <asm/page.h>
  48#include <asm/prom.h>
  49#include <asm/rtas.h>
  50#include <asm/io.h>
  51#include <asm/mmu_context.h>
  52#include <asm/pgtable.h>
  53#include <asm/mmu.h>
  54#include <asm/uaccess.h>
  55#include <asm/smp.h>
  56#include <asm/machdep.h>
  57#include <asm/tlb.h>
  58#include <asm/eeh.h>
  59#include <asm/processor.h>
  60#include <asm/mmzone.h>
  61#include <asm/cputable.h>
  62#include <asm/sections.h>
  63#include <asm/iommu.h>
  64#include <asm/vdso.h>
  65
  66#include "mmu_decl.h"
  67
  68#ifdef CONFIG_PPC_STD_MMU_64
  69#if PGTABLE_RANGE > USER_VSID_RANGE
  70#warning Limited user VSID range means pagetable space is wasted
  71#endif
  72
  73#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
  74#warning TASK_SIZE is smaller than it needs to be.
  75#endif
  76#endif /* CONFIG_PPC_STD_MMU_64 */
  77
  78phys_addr_t memstart_addr = ~0;
  79EXPORT_SYMBOL_GPL(memstart_addr);
  80phys_addr_t kernstart_addr;
  81EXPORT_SYMBOL_GPL(kernstart_addr);
  82
  83static void pgd_ctor(void *addr)
  84{
  85        memset(addr, 0, PGD_TABLE_SIZE);
  86}
  87
  88static void pud_ctor(void *addr)
  89{
  90        memset(addr, 0, PUD_TABLE_SIZE);
  91}
  92
  93static void pmd_ctor(void *addr)
  94{
  95        memset(addr, 0, PMD_TABLE_SIZE);
  96}
  97
  98struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
  99
 100/*
 101 * Create a kmem_cache() for pagetables.  This is not used for PTE
 102 * pages - they're linked to struct page, come from the normal free
 103 * pages pool and have a different entry size (see real_pte_t) to
 104 * everything else.  Caches created by this function are used for all
 105 * the higher level pagetables, and for hugepage pagetables.
 106 */
 107void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
 108{
 109        char *name;
 110        unsigned long table_size = sizeof(void *) << shift;
 111        unsigned long align = table_size;
 112
 113        /* When batching pgtable pointers for RCU freeing, we store
 114         * the index size in the low bits.  Table alignment must be
 115         * big enough to fit it.
 116         *
 117         * Likewise, hugeapge pagetable pointers contain a (different)
 118         * shift value in the low bits.  All tables must be aligned so
 119         * as to leave enough 0 bits in the address to contain it. */
 120        unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
 121                                     HUGEPD_SHIFT_MASK + 1);
 122        struct kmem_cache *new;
 123
 124        /* It would be nice if this was a BUILD_BUG_ON(), but at the
 125         * moment, gcc doesn't seem to recognize is_power_of_2 as a
 126         * constant expression, so so much for that. */
 127        BUG_ON(!is_power_of_2(minalign));
 128        BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
 129
 130        if (PGT_CACHE(shift))
 131                return; /* Already have a cache of this size */
 132
 133        align = max_t(unsigned long, align, minalign);
 134        name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
 135        new = kmem_cache_create(name, table_size, align, 0, ctor);
 136        kfree(name);
 137        pgtable_cache[shift - 1] = new;
 138        pr_debug("Allocated pgtable cache for order %d\n", shift);
 139}
 140
 141
 142void pgtable_cache_init(void)
 143{
 144        pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
 145        pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
 146        /*
 147         * In all current configs, when the PUD index exists it's the
 148         * same size as either the pgd or pmd index except with THP enabled
 149         * on book3s 64
 150         */
 151        if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
 152                pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
 153
 154        if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
 155                panic("Couldn't allocate pgtable caches");
 156        if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
 157                panic("Couldn't allocate pud pgtable caches");
 158}
 159
 160#ifdef CONFIG_SPARSEMEM_VMEMMAP
 161/*
 162 * Given an address within the vmemmap, determine the pfn of the page that
 163 * represents the start of the section it is within.  Note that we have to
 164 * do this by hand as the proffered address may not be correctly aligned.
 165 * Subtraction of non-aligned pointers produces undefined results.
 166 */
 167static unsigned long __meminit vmemmap_section_start(unsigned long page)
 168{
 169        unsigned long offset = page - ((unsigned long)(vmemmap));
 170
 171        /* Return the pfn of the start of the section. */
 172        return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
 173}
 174
 175/*
 176 * Check if this vmemmap page is already initialised.  If any section
 177 * which overlaps this vmemmap page is initialised then this page is
 178 * initialised already.
 179 */
 180static int __meminit vmemmap_populated(unsigned long start, int page_size)
 181{
 182        unsigned long end = start + page_size;
 183        start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
 184
 185        for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
 186                if (pfn_valid(page_to_pfn((struct page *)start)))
 187                        return 1;
 188
 189        return 0;
 190}
 191
 192/* On hash-based CPUs, the vmemmap is bolted in the hash table.
 193 *
 194 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
 195 * the vmalloc space using normal page tables, though the size of
 196 * pages encoded in the PTEs can be different
 197 */
 198
 199#ifdef CONFIG_PPC_BOOK3E
 200static int __meminit vmemmap_create_mapping(unsigned long start,
 201                                            unsigned long page_size,
 202                                            unsigned long phys)
 203{
 204        /* Create a PTE encoding without page size */
 205        unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
 206                _PAGE_KERNEL_RW;
 207
 208        /* PTEs only contain page size encodings up to 32M */
 209        BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
 210
 211        /* Encode the size in the PTE */
 212        flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
 213
 214        /* For each PTE for that area, map things. Note that we don't
 215         * increment phys because all PTEs are of the large size and
 216         * thus must have the low bits clear
 217         */
 218        for (i = 0; i < page_size; i += PAGE_SIZE)
 219                BUG_ON(map_kernel_page(start + i, phys, flags));
 220
 221        return 0;
 222}
 223
 224#ifdef CONFIG_MEMORY_HOTPLUG
 225static void vmemmap_remove_mapping(unsigned long start,
 226                                   unsigned long page_size)
 227{
 228}
 229#endif
 230#else /* CONFIG_PPC_BOOK3E */
 231static int __meminit vmemmap_create_mapping(unsigned long start,
 232                                            unsigned long page_size,
 233                                            unsigned long phys)
 234{
 235        int rc = htab_bolt_mapping(start, start + page_size, phys,
 236                                   pgprot_val(PAGE_KERNEL),
 237                                   mmu_vmemmap_psize, mmu_kernel_ssize);
 238        if (rc < 0) {
 239                int rc2 = htab_remove_mapping(start, start + page_size,
 240                                              mmu_vmemmap_psize,
 241                                              mmu_kernel_ssize);
 242                BUG_ON(rc2 && (rc2 != -ENOENT));
 243        }
 244        return rc;
 245}
 246
 247#ifdef CONFIG_MEMORY_HOTPLUG
 248static void vmemmap_remove_mapping(unsigned long start,
 249                                   unsigned long page_size)
 250{
 251        int rc = htab_remove_mapping(start, start + page_size,
 252                                     mmu_vmemmap_psize,
 253                                     mmu_kernel_ssize);
 254        BUG_ON((rc < 0) && (rc != -ENOENT));
 255        WARN_ON(rc == -ENOENT);
 256}
 257#endif
 258
 259#endif /* CONFIG_PPC_BOOK3E */
 260
 261struct vmemmap_backing *vmemmap_list;
 262static struct vmemmap_backing *next;
 263static int num_left;
 264static int num_freed;
 265
 266static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
 267{
 268        struct vmemmap_backing *vmem_back;
 269        /* get from freed entries first */
 270        if (num_freed) {
 271                num_freed--;
 272                vmem_back = next;
 273                next = next->list;
 274
 275                return vmem_back;
 276        }
 277
 278        /* allocate a page when required and hand out chunks */
 279        if (!num_left) {
 280                next = vmemmap_alloc_block(PAGE_SIZE, node);
 281                if (unlikely(!next)) {
 282                        WARN_ON(1);
 283                        return NULL;
 284                }
 285                num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
 286        }
 287
 288        num_left--;
 289
 290        return next++;
 291}
 292
 293static __meminit void vmemmap_list_populate(unsigned long phys,
 294                                            unsigned long start,
 295                                            int node)
 296{
 297        struct vmemmap_backing *vmem_back;
 298
 299        vmem_back = vmemmap_list_alloc(node);
 300        if (unlikely(!vmem_back)) {
 301                WARN_ON(1);
 302                return;
 303        }
 304
 305        vmem_back->phys = phys;
 306        vmem_back->virt_addr = start;
 307        vmem_back->list = vmemmap_list;
 308
 309        vmemmap_list = vmem_back;
 310}
 311
 312int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
 313{
 314        unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 315
 316        /* Align to the page size of the linear mapping. */
 317        start = _ALIGN_DOWN(start, page_size);
 318
 319        pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
 320
 321        for (; start < end; start += page_size) {
 322                void *p;
 323                int rc;
 324
 325                if (vmemmap_populated(start, page_size))
 326                        continue;
 327
 328                p = vmemmap_alloc_block(page_size, node);
 329                if (!p)
 330                        return -ENOMEM;
 331
 332                vmemmap_list_populate(__pa(p), start, node);
 333
 334                pr_debug("      * %016lx..%016lx allocated at %p\n",
 335                         start, start + page_size, p);
 336
 337                rc = vmemmap_create_mapping(start, page_size, __pa(p));
 338                if (rc < 0) {
 339                        pr_warning(
 340                                "vmemmap_populate: Unable to create vmemmap mapping: %d\n",
 341                                rc);
 342                        return -EFAULT;
 343                }
 344        }
 345
 346        return 0;
 347}
 348
 349#ifdef CONFIG_MEMORY_HOTPLUG
 350static unsigned long vmemmap_list_free(unsigned long start)
 351{
 352        struct vmemmap_backing *vmem_back, *vmem_back_prev;
 353
 354        vmem_back_prev = vmem_back = vmemmap_list;
 355
 356        /* look for it with prev pointer recorded */
 357        for (; vmem_back; vmem_back = vmem_back->list) {
 358                if (vmem_back->virt_addr == start)
 359                        break;
 360                vmem_back_prev = vmem_back;
 361        }
 362
 363        if (unlikely(!vmem_back)) {
 364                WARN_ON(1);
 365                return 0;
 366        }
 367
 368        /* remove it from vmemmap_list */
 369        if (vmem_back == vmemmap_list) /* remove head */
 370                vmemmap_list = vmem_back->list;
 371        else
 372                vmem_back_prev->list = vmem_back->list;
 373
 374        /* next point to this freed entry */
 375        vmem_back->list = next;
 376        next = vmem_back;
 377        num_freed++;
 378
 379        return vmem_back->phys;
 380}
 381
 382void __ref vmemmap_free(unsigned long start, unsigned long end)
 383{
 384        unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 385
 386        start = _ALIGN_DOWN(start, page_size);
 387
 388        pr_debug("vmemmap_free %lx...%lx\n", start, end);
 389
 390        for (; start < end; start += page_size) {
 391                unsigned long addr;
 392
 393                /*
 394                 * the section has already be marked as invalid, so
 395                 * vmemmap_populated() true means some other sections still
 396                 * in this page, so skip it.
 397                 */
 398                if (vmemmap_populated(start, page_size))
 399                        continue;
 400
 401                addr = vmemmap_list_free(start);
 402                if (addr) {
 403                        struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
 404
 405                        if (PageReserved(page)) {
 406                                /* allocated from bootmem */
 407                                if (page_size < PAGE_SIZE) {
 408                                        /*
 409                                         * this shouldn't happen, but if it is
 410                                         * the case, leave the memory there
 411                                         */
 412                                        WARN_ON_ONCE(1);
 413                                } else {
 414                                        unsigned int nr_pages =
 415                                                1 << get_order(page_size);
 416                                        while (nr_pages--)
 417                                                free_reserved_page(page++);
 418                                }
 419                        } else
 420                                free_pages((unsigned long)(__va(addr)),
 421                                                        get_order(page_size));
 422
 423                        vmemmap_remove_mapping(start, page_size);
 424                }
 425        }
 426}
 427#endif
 428void register_page_bootmem_memmap(unsigned long section_nr,
 429                                  struct page *start_page, unsigned long size)
 430{
 431}
 432
 433/*
 434 * We do not have access to the sparsemem vmemmap, so we fallback to
 435 * walking the list of sparsemem blocks which we already maintain for
 436 * the sake of crashdump. In the long run, we might want to maintain
 437 * a tree if performance of that linear walk becomes a problem.
 438 *
 439 * realmode_pfn_to_page functions can fail due to:
 440 * 1) As real sparsemem blocks do not lay in RAM continously (they
 441 * are in virtual address space which is not available in the real mode),
 442 * the requested page struct can be split between blocks so get_page/put_page
 443 * may fail.
 444 * 2) When huge pages are used, the get_page/put_page API will fail
 445 * in real mode as the linked addresses in the page struct are virtual
 446 * too.
 447 */
 448struct page *realmode_pfn_to_page(unsigned long pfn)
 449{
 450        struct vmemmap_backing *vmem_back;
 451        struct page *page;
 452        unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 453        unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
 454
 455        for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
 456                if (pg_va < vmem_back->virt_addr)
 457                        continue;
 458
 459                /* After vmemmap_list entry free is possible, need check all */
 460                if ((pg_va + sizeof(struct page)) <=
 461                                (vmem_back->virt_addr + page_size)) {
 462                        page = (struct page *) (vmem_back->phys + pg_va -
 463                                vmem_back->virt_addr);
 464                        return page;
 465                }
 466        }
 467
 468        /* Probably that page struct is split between real pages */
 469        return NULL;
 470}
 471EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
 472
 473#elif defined(CONFIG_FLATMEM)
 474
 475struct page *realmode_pfn_to_page(unsigned long pfn)
 476{
 477        struct page *page = pfn_to_page(pfn);
 478        return page;
 479}
 480EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
 481
 482#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
 483