linux/arch/score/mm/init.c
<<
>>
Prefs
   1/*
   2 * arch/score/mm/init.c
   3 *
   4 * Score Processor version.
   5 *
   6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
   7 *  Lennox Wu <lennox.wu@sunplusct.com>
   8 *  Chen Liqin <liqin.chen@sunplusct.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, see the file COPYING, or write
  22 * to the Free Software Foundation, Inc.,
  23 * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  24 */
  25
  26#include <linux/errno.h>
  27#include <linux/bootmem.h>
  28#include <linux/kernel.h>
  29#include <linux/init.h>
  30#include <linux/mm.h>
  31#include <linux/mman.h>
  32#include <linux/pagemap.h>
  33#include <linux/proc_fs.h>
  34#include <linux/sched.h>
  35#include <linux/initrd.h>
  36
  37#include <asm/sections.h>
  38#include <asm/tlb.h>
  39
  40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  41
  42unsigned long empty_zero_page;
  43EXPORT_SYMBOL_GPL(empty_zero_page);
  44
  45static struct kcore_list kcore_mem, kcore_vmalloc;
  46
  47static unsigned long setup_zero_page(void)
  48{
  49        struct page *page;
  50
  51        empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
  52        if (!empty_zero_page)
  53                panic("Oh boy, that early out of memory?");
  54
  55        page = virt_to_page((void *) empty_zero_page);
  56        SetPageReserved(page);
  57
  58        return 1UL;
  59}
  60
  61#ifndef CONFIG_NEED_MULTIPLE_NODES
  62static int __init page_is_ram(unsigned long pagenr)
  63{
  64        if (pagenr >= min_low_pfn && pagenr < max_low_pfn)
  65                return 1;
  66        else
  67                return 0;
  68}
  69
  70void __init paging_init(void)
  71{
  72        unsigned long max_zone_pfns[MAX_NR_ZONES];
  73        unsigned long lastpfn;
  74
  75        pagetable_init();
  76        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  77        lastpfn = max_low_pfn;
  78        free_area_init_nodes(max_zone_pfns);
  79}
  80
  81void __init mem_init(void)
  82{
  83        unsigned long codesize, reservedpages, datasize, initsize;
  84        unsigned long tmp, ram = 0;
  85
  86        max_mapnr = max_low_pfn;
  87        high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
  88        totalram_pages += free_all_bootmem();
  89        totalram_pages -= setup_zero_page();    /* Setup zeroed pages. */
  90        reservedpages = 0;
  91
  92        for (tmp = 0; tmp < max_low_pfn; tmp++)
  93                if (page_is_ram(tmp)) {
  94                        ram++;
  95                        if (PageReserved(pfn_to_page(tmp)))
  96                                reservedpages++;
  97                }
  98
  99        num_physpages = ram;
 100        codesize = (unsigned long) &_etext - (unsigned long) &_text;
 101        datasize = (unsigned long) &_edata - (unsigned long) &_etext;
 102        initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
 103
 104        kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
 105        kclist_add(&kcore_vmalloc, (void *) VMALLOC_START,
 106                        VMALLOC_END - VMALLOC_START);
 107
 108        printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
 109                        "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
 110                        (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
 111                        ram << (PAGE_SHIFT-10), codesize >> 10,
 112                        reservedpages << (PAGE_SHIFT-10), datasize >> 10,
 113                        initsize >> 10,
 114                        (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
 115}
 116#endif /* !CONFIG_NEED_MULTIPLE_NODES */
 117
 118static void free_init_pages(const char *what, unsigned long begin, unsigned long end)
 119{
 120        unsigned long pfn;
 121
 122        for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
 123                struct page *page = pfn_to_page(pfn);
 124                void *addr = phys_to_virt(PFN_PHYS(pfn));
 125
 126                ClearPageReserved(page);
 127                init_page_count(page);
 128                memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
 129                __free_page(page);
 130                totalram_pages++;
 131        }
 132        printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
 133}
 134
 135#ifdef CONFIG_BLK_DEV_INITRD
 136void free_initrd_mem(unsigned long start, unsigned long end)
 137{
 138        free_init_pages("initrd memory",
 139                virt_to_phys((void *) start),
 140                virt_to_phys((void *) end));
 141}
 142#endif
 143
 144void __init_refok free_initmem(void)
 145{
 146        free_init_pages("unused kernel memory",
 147        __pa(&__init_begin),
 148        __pa(&__init_end));
 149}
 150
 151unsigned long pgd_current;
 152
 153#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
 154
 155/*
 156 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
 157 * are constants.  So we use the variants from asm-offset.h until that gcc
 158 * will officially be retired.
 159 */
 160pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PTE_ORDER);
 161pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
 162