linux/arch/sh/mm/init.c
<<
>>
Prefs
   1/*
   2 * linux/arch/sh/mm/init.c
   3 *
   4 *  Copyright (C) 1999  Niibe Yutaka
   5 *  Copyright (C) 2002 - 2007  Paul Mundt
   6 *
   7 *  Based on linux/arch/i386/mm/init.c:
   8 *   Copyright (C) 1995  Linus Torvalds
   9 */
  10#include <linux/mm.h>
  11#include <linux/swap.h>
  12#include <linux/init.h>
  13#include <linux/bootmem.h>
  14#include <linux/proc_fs.h>
  15#include <linux/pagemap.h>
  16#include <linux/percpu.h>
  17#include <linux/io.h>
  18#include <asm/mmu_context.h>
  19#include <asm/tlb.h>
  20#include <asm/cacheflush.h>
  21#include <asm/sections.h>
  22#include <asm/cache.h>
  23
  24DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  25pgd_t swapper_pg_dir[PTRS_PER_PGD];
  26
  27#ifdef CONFIG_SUPERH32
  28/*
  29 * Handle trivial transitions between cached and uncached
  30 * segments, making use of the 1:1 mapping relationship in
  31 * 512MB lowmem.
  32 *
  33 * This is the offset of the uncached section from its cached alias.
  34 * Default value only valid in 29 bit mode, in 32bit mode will be
  35 * overridden in pmb_init.
  36 */
  37unsigned long cached_to_uncached = P2SEG - P1SEG;
  38#endif
  39
  40#ifdef CONFIG_MMU
  41static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
  42{
  43        pgd_t *pgd;
  44        pud_t *pud;
  45        pmd_t *pmd;
  46        pte_t *pte;
  47
  48        pgd = pgd_offset_k(addr);
  49        if (pgd_none(*pgd)) {
  50                pgd_ERROR(*pgd);
  51                return;
  52        }
  53
  54        pud = pud_alloc(NULL, pgd, addr);
  55        if (unlikely(!pud)) {
  56                pud_ERROR(*pud);
  57                return;
  58        }
  59
  60        pmd = pmd_alloc(NULL, pud, addr);
  61        if (unlikely(!pmd)) {
  62                pmd_ERROR(*pmd);
  63                return;
  64        }
  65
  66        pte = pte_offset_kernel(pmd, addr);
  67        if (!pte_none(*pte)) {
  68                pte_ERROR(*pte);
  69                return;
  70        }
  71
  72        set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
  73        local_flush_tlb_one(get_asid(), addr);
  74}
  75
  76/*
  77 * As a performance optimization, other platforms preserve the fixmap mapping
  78 * across a context switch, we don't presently do this, but this could be done
  79 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
  80 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
  81 * give up a TLB entry for each mapping we want to preserve. While this may be
  82 * viable for a small number of fixmaps, it's not particularly useful for
  83 * everything and needs to be carefully evaluated. (ie, we may want this for
  84 * the vsyscall page).
  85 *
  86 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
  87 * in at __set_fixmap() time to determine the appropriate behavior to follow.
  88 *
  89 *                                       -- PFM.
  90 */
  91void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
  92{
  93        unsigned long address = __fix_to_virt(idx);
  94
  95        if (idx >= __end_of_fixed_addresses) {
  96                BUG();
  97                return;
  98        }
  99
 100        set_pte_phys(address, phys, prot);
 101}
 102
 103void __init page_table_range_init(unsigned long start, unsigned long end,
 104                                         pgd_t *pgd_base)
 105{
 106        pgd_t *pgd;
 107        pud_t *pud;
 108        pmd_t *pmd;
 109        pte_t *pte;
 110        int i, j, k;
 111        unsigned long vaddr;
 112
 113        vaddr = start;
 114        i = __pgd_offset(vaddr);
 115        j = __pud_offset(vaddr);
 116        k = __pmd_offset(vaddr);
 117        pgd = pgd_base + i;
 118
 119        for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
 120                pud = (pud_t *)pgd;
 121                for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
 122                        pmd = (pmd_t *)pud;
 123                        for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
 124                                if (pmd_none(*pmd)) {
 125                                        pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
 126                                        pmd_populate_kernel(&init_mm, pmd, pte);
 127                                        BUG_ON(pte != pte_offset_kernel(pmd, 0));
 128                                }
 129                                vaddr += PMD_SIZE;
 130                        }
 131                        k = 0;
 132                }
 133                j = 0;
 134        }
 135}
 136#endif  /* CONFIG_MMU */
 137
 138/*
 139 * paging_init() sets up the page tables
 140 */
 141void __init paging_init(void)
 142{
 143        unsigned long max_zone_pfns[MAX_NR_ZONES];
 144        unsigned long vaddr, end;
 145        int nid;
 146
 147        /* We don't need to map the kernel through the TLB, as
 148         * it is permanatly mapped using P1. So clear the
 149         * entire pgd. */
 150        memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
 151
 152        /* Set an initial value for the MMU.TTB so we don't have to
 153         * check for a null value. */
 154        set_TTB(swapper_pg_dir);
 155
 156        /*
 157         * Populate the relevant portions of swapper_pg_dir so that
 158         * we can use the fixmap entries without calling kmalloc.
 159         * pte's will be filled in by __set_fixmap().
 160         */
 161        vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
 162        end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
 163        page_table_range_init(vaddr, end, swapper_pg_dir);
 164
 165        kmap_coherent_init();
 166
 167        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 168
 169        for_each_online_node(nid) {
 170                pg_data_t *pgdat = NODE_DATA(nid);
 171                unsigned long low, start_pfn;
 172
 173                start_pfn = pgdat->bdata->node_min_pfn;
 174                low = pgdat->bdata->node_low_pfn;
 175
 176                if (max_zone_pfns[ZONE_NORMAL] < low)
 177                        max_zone_pfns[ZONE_NORMAL] = low;
 178
 179                printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
 180                       nid, start_pfn, low);
 181        }
 182
 183        free_area_init_nodes(max_zone_pfns);
 184
 185        /* Set up the uncached fixmap */
 186        set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
 187}
 188
 189void __init mem_init(void)
 190{
 191        int codesize, datasize, initsize;
 192        int nid;
 193
 194        num_physpages = 0;
 195        high_memory = NULL;
 196
 197        for_each_online_node(nid) {
 198                pg_data_t *pgdat = NODE_DATA(nid);
 199                unsigned long node_pages = 0;
 200                void *node_high_memory;
 201
 202                num_physpages += pgdat->node_present_pages;
 203
 204                if (pgdat->node_spanned_pages)
 205                        node_pages = free_all_bootmem_node(pgdat);
 206
 207                totalram_pages += node_pages;
 208
 209                node_high_memory = (void *)__va((pgdat->node_start_pfn +
 210                                                 pgdat->node_spanned_pages) <<
 211                                                 PAGE_SHIFT);
 212                if (node_high_memory > high_memory)
 213                        high_memory = node_high_memory;
 214        }
 215
 216        /* Set this up early, so we can take care of the zero page */
 217        cpu_cache_init();
 218
 219        /* clear the zero-page */
 220        memset(empty_zero_page, 0, PAGE_SIZE);
 221        __flush_wback_region(empty_zero_page, PAGE_SIZE);
 222
 223        codesize =  (unsigned long) &_etext - (unsigned long) &_text;
 224        datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
 225        initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
 226
 227        printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
 228               "%dk data, %dk init)\n",
 229                nr_free_pages() << (PAGE_SHIFT-10),
 230                num_physpages << (PAGE_SHIFT-10),
 231                codesize >> 10,
 232                datasize >> 10,
 233                initsize >> 10);
 234
 235        /* Initialize the vDSO */
 236        vsyscall_init();
 237}
 238
 239void free_initmem(void)
 240{
 241        unsigned long addr;
 242
 243        addr = (unsigned long)(&__init_begin);
 244        for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
 245                ClearPageReserved(virt_to_page(addr));
 246                init_page_count(virt_to_page(addr));
 247                free_page(addr);
 248                totalram_pages++;
 249        }
 250        printk("Freeing unused kernel memory: %ldk freed\n",
 251               ((unsigned long)&__init_end -
 252                (unsigned long)&__init_begin) >> 10);
 253}
 254
 255#ifdef CONFIG_BLK_DEV_INITRD
 256void free_initrd_mem(unsigned long start, unsigned long end)
 257{
 258        unsigned long p;
 259        for (p = start; p < end; p += PAGE_SIZE) {
 260                ClearPageReserved(virt_to_page(p));
 261                init_page_count(virt_to_page(p));
 262                free_page(p);
 263                totalram_pages++;
 264        }
 265        printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
 266}
 267#endif
 268
 269#if THREAD_SHIFT < PAGE_SHIFT
 270static struct kmem_cache *thread_info_cache;
 271
 272struct thread_info *alloc_thread_info(struct task_struct *tsk)
 273{
 274        struct thread_info *ti;
 275
 276        ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
 277        if (unlikely(ti == NULL))
 278                return NULL;
 279#ifdef CONFIG_DEBUG_STACK_USAGE
 280        memset(ti, 0, THREAD_SIZE);
 281#endif
 282        return ti;
 283}
 284
 285void free_thread_info(struct thread_info *ti)
 286{
 287        kmem_cache_free(thread_info_cache, ti);
 288}
 289
 290void thread_info_cache_init(void)
 291{
 292        thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
 293                                              THREAD_SIZE, 0, NULL);
 294        BUG_ON(thread_info_cache == NULL);
 295}
 296#endif /* THREAD_SHIFT < PAGE_SHIFT */
 297
 298#ifdef CONFIG_MEMORY_HOTPLUG
 299int arch_add_memory(int nid, u64 start, u64 size)
 300{
 301        pg_data_t *pgdat;
 302        unsigned long start_pfn = start >> PAGE_SHIFT;
 303        unsigned long nr_pages = size >> PAGE_SHIFT;
 304        int ret;
 305
 306        pgdat = NODE_DATA(nid);
 307
 308        /* We only have ZONE_NORMAL, so this is easy.. */
 309        ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
 310                                start_pfn, nr_pages);
 311        if (unlikely(ret))
 312                printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
 313
 314        return ret;
 315}
 316EXPORT_SYMBOL_GPL(arch_add_memory);
 317
 318#ifdef CONFIG_NUMA
 319int memory_add_physaddr_to_nid(u64 addr)
 320{
 321        /* Node 0 for now.. */
 322        return 0;
 323}
 324EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 325#endif
 326#endif /* CONFIG_MEMORY_HOTPLUG */
 327