linux/arch/powerpc/mm/init_64.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
   6 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
   7 *    Copyright (C) 1996 Paul Mackerras
   8 *
   9 *  Derived from "arch/i386/mm/init.c"
  10 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  11 *
  12 *  Dave Engebretsen <engebret@us.ibm.com>
  13 *      Rework for PPC64 port.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 *
  20 */
  21
  22#include <linux/signal.h>
  23#include <linux/sched.h>
  24#include <linux/kernel.h>
  25#include <linux/errno.h>
  26#include <linux/string.h>
  27#include <linux/types.h>
  28#include <linux/mman.h>
  29#include <linux/mm.h>
  30#include <linux/swap.h>
  31#include <linux/stddef.h>
  32#include <linux/vmalloc.h>
  33#include <linux/init.h>
  34#include <linux/delay.h>
  35#include <linux/bootmem.h>
  36#include <linux/highmem.h>
  37#include <linux/idr.h>
  38#include <linux/nodemask.h>
  39#include <linux/module.h>
  40#include <linux/poison.h>
  41
  42#include <asm/pgalloc.h>
  43#include <asm/page.h>
  44#include <asm/prom.h>
  45#include <asm/lmb.h>
  46#include <asm/rtas.h>
  47#include <asm/io.h>
  48#include <asm/mmu_context.h>
  49#include <asm/pgtable.h>
  50#include <asm/mmu.h>
  51#include <asm/uaccess.h>
  52#include <asm/smp.h>
  53#include <asm/machdep.h>
  54#include <asm/tlb.h>
  55#include <asm/eeh.h>
  56#include <asm/processor.h>
  57#include <asm/mmzone.h>
  58#include <asm/cputable.h>
  59#include <asm/sections.h>
  60#include <asm/system.h>
  61#include <asm/iommu.h>
  62#include <asm/abs_addr.h>
  63#include <asm/vdso.h>
  64
  65#include "mmu_decl.h"
  66
  67#if PGTABLE_RANGE > USER_VSID_RANGE
  68#warning Limited user VSID range means pagetable space is wasted
  69#endif
  70
  71#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
  72#warning TASK_SIZE is smaller than it needs to be.
  73#endif
  74
  75/* max amount of RAM to use */
  76unsigned long __max_memory;
  77
  78void free_initmem(void)
  79{
  80        unsigned long addr;
  81
  82        addr = (unsigned long)__init_begin;
  83        for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
  84                memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  85                ClearPageReserved(virt_to_page(addr));
  86                init_page_count(virt_to_page(addr));
  87                free_page(addr);
  88                totalram_pages++;
  89        }
  90        printk ("Freeing unused kernel memory: %luk freed\n",
  91                ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
  92}
  93
  94#ifdef CONFIG_BLK_DEV_INITRD
  95void free_initrd_mem(unsigned long start, unsigned long end)
  96{
  97        if (start < end)
  98                printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
  99        for (; start < end; start += PAGE_SIZE) {
 100                ClearPageReserved(virt_to_page(start));
 101                init_page_count(virt_to_page(start));
 102                free_page(start);
 103                totalram_pages++;
 104        }
 105}
 106#endif
 107
 108#ifdef CONFIG_PROC_KCORE
 109static struct kcore_list kcore_vmem;
 110
 111static int __init setup_kcore(void)
 112{
 113        int i;
 114
 115        for (i=0; i < lmb.memory.cnt; i++) {
 116                unsigned long base, size;
 117                struct kcore_list *kcore_mem;
 118
 119                base = lmb.memory.region[i].base;
 120                size = lmb.memory.region[i].size;
 121
 122                /* GFP_ATOMIC to avoid might_sleep warnings during boot */
 123                kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
 124                if (!kcore_mem)
 125                        panic("%s: kmalloc failed\n", __FUNCTION__);
 126
 127                kclist_add(kcore_mem, __va(base), size);
 128        }
 129
 130        kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
 131
 132        return 0;
 133}
 134module_init(setup_kcore);
 135#endif
 136
 137static void zero_ctor(struct kmem_cache *cache, void *addr)
 138{
 139        memset(addr, 0, kmem_cache_size(cache));
 140}
 141
 142static const unsigned int pgtable_cache_size[2] = {
 143        PGD_TABLE_SIZE, PMD_TABLE_SIZE
 144};
 145static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
 146#ifdef CONFIG_PPC_64K_PAGES
 147        "pgd_cache", "pmd_cache",
 148#else
 149        "pgd_cache", "pud_pmd_cache",
 150#endif /* CONFIG_PPC_64K_PAGES */
 151};
 152
 153#ifdef CONFIG_HUGETLB_PAGE
 154/* Hugepages need one extra cache, initialized in hugetlbpage.c.  We
 155 * can't put into the tables above, because HPAGE_SHIFT is not compile
 156 * time constant. */
 157struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
 158#else
 159struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
 160#endif
 161
 162void pgtable_cache_init(void)
 163{
 164        int i;
 165
 166        for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
 167                int size = pgtable_cache_size[i];
 168                const char *name = pgtable_cache_name[i];
 169
 170                pr_debug("Allocating page table cache %s (#%d) "
 171                        "for size: %08x...\n", name, i, size);
 172                pgtable_cache[i] = kmem_cache_create(name,
 173                                                     size, size,
 174                                                     SLAB_PANIC,
 175                                                     zero_ctor);
 176        }
 177}
 178
 179#ifdef CONFIG_SPARSEMEM_VMEMMAP
 180/*
 181 * Given an address within the vmemmap, determine the pfn of the page that
 182 * represents the start of the section it is within.  Note that we have to
 183 * do this by hand as the proffered address may not be correctly aligned.
 184 * Subtraction of non-aligned pointers produces undefined results.
 185 */
 186unsigned long __meminit vmemmap_section_start(unsigned long page)
 187{
 188        unsigned long offset = page - ((unsigned long)(vmemmap));
 189
 190        /* Return the pfn of the start of the section. */
 191        return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
 192}
 193
 194/*
 195 * Check if this vmemmap page is already initialised.  If any section
 196 * which overlaps this vmemmap page is initialised then this page is
 197 * initialised already.
 198 */
 199int __meminit vmemmap_populated(unsigned long start, int page_size)
 200{
 201        unsigned long end = start + page_size;
 202
 203        for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
 204                if (pfn_valid(vmemmap_section_start(start)))
 205                        return 1;
 206
 207        return 0;
 208}
 209
 210int __meminit vmemmap_populate(struct page *start_page,
 211                                        unsigned long nr_pages, int node)
 212{
 213        unsigned long mode_rw;
 214        unsigned long start = (unsigned long)start_page;
 215        unsigned long end = (unsigned long)(start_page + nr_pages);
 216        unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;
 217
 218        mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
 219
 220        /* Align to the page size of the linear mapping. */
 221        start = _ALIGN_DOWN(start, page_size);
 222
 223        for (; start < end; start += page_size) {
 224                int mapped;
 225                void *p;
 226
 227                if (vmemmap_populated(start, page_size))
 228                        continue;
 229
 230                p = vmemmap_alloc_block(page_size, node);
 231                if (!p)
 232                        return -ENOMEM;
 233
 234                pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
 235                        start, p, __pa(p));
 236
 237                mapped = htab_bolt_mapping(start, start + page_size,
 238                                        __pa(p), mode_rw, mmu_linear_psize,
 239                                        mmu_kernel_ssize);
 240                BUG_ON(mapped < 0);
 241        }
 242
 243        return 0;
 244}
 245#endif
 246