linux/arch/sh/mm/init.c
<<
>>
Prefs
   1/*
   2 * linux/arch/sh/mm/init.c
   3 *
   4 *  Copyright (C) 1999  Niibe Yutaka
   5 *  Copyright (C) 2002 - 2011  Paul Mundt
   6 *
   7 *  Based on linux/arch/i386/mm/init.c:
   8 *   Copyright (C) 1995  Linus Torvalds
   9 */
  10#include <linux/mm.h>
  11#include <linux/swap.h>
  12#include <linux/init.h>
  13#include <linux/gfp.h>
  14#include <linux/bootmem.h>
  15#include <linux/proc_fs.h>
  16#include <linux/pagemap.h>
  17#include <linux/percpu.h>
  18#include <linux/io.h>
  19#include <linux/memblock.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/export.h>
  22#include <asm/mmu_context.h>
  23#include <asm/mmzone.h>
  24#include <asm/kexec.h>
  25#include <asm/tlb.h>
  26#include <asm/cacheflush.h>
  27#include <asm/sections.h>
  28#include <asm/setup.h>
  29#include <asm/cache.h>
  30#include <asm/sizes.h>
  31
  32pgd_t swapper_pg_dir[PTRS_PER_PGD];
  33
  34void __init generic_mem_init(void)
  35{
  36        memblock_add(__MEMORY_START, __MEMORY_SIZE);
  37}
  38
  39void __init __weak plat_mem_setup(void)
  40{
  41        /* Nothing to see here, move along. */
  42}
  43
  44#ifdef CONFIG_MMU
  45static pte_t *__get_pte_phys(unsigned long addr)
  46{
  47        pgd_t *pgd;
  48        pud_t *pud;
  49        pmd_t *pmd;
  50
  51        pgd = pgd_offset_k(addr);
  52        if (pgd_none(*pgd)) {
  53                pgd_ERROR(*pgd);
  54                return NULL;
  55        }
  56
  57        pud = pud_alloc(NULL, pgd, addr);
  58        if (unlikely(!pud)) {
  59                pud_ERROR(*pud);
  60                return NULL;
  61        }
  62
  63        pmd = pmd_alloc(NULL, pud, addr);
  64        if (unlikely(!pmd)) {
  65                pmd_ERROR(*pmd);
  66                return NULL;
  67        }
  68
  69        return pte_offset_kernel(pmd, addr);
  70}
  71
  72static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
  73{
  74        pte_t *pte;
  75
  76        pte = __get_pte_phys(addr);
  77        if (!pte_none(*pte)) {
  78                pte_ERROR(*pte);
  79                return;
  80        }
  81
  82        set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
  83        local_flush_tlb_one(get_asid(), addr);
  84
  85        if (pgprot_val(prot) & _PAGE_WIRED)
  86                tlb_wire_entry(NULL, addr, *pte);
  87}
  88
  89static void clear_pte_phys(unsigned long addr, pgprot_t prot)
  90{
  91        pte_t *pte;
  92
  93        pte = __get_pte_phys(addr);
  94
  95        if (pgprot_val(prot) & _PAGE_WIRED)
  96                tlb_unwire_entry();
  97
  98        set_pte(pte, pfn_pte(0, __pgprot(0)));
  99        local_flush_tlb_one(get_asid(), addr);
 100}
 101
 102void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
 103{
 104        unsigned long address = __fix_to_virt(idx);
 105
 106        if (idx >= __end_of_fixed_addresses) {
 107                BUG();
 108                return;
 109        }
 110
 111        set_pte_phys(address, phys, prot);
 112}
 113
 114void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
 115{
 116        unsigned long address = __fix_to_virt(idx);
 117
 118        if (idx >= __end_of_fixed_addresses) {
 119                BUG();
 120                return;
 121        }
 122
 123        clear_pte_phys(address, prot);
 124}
 125
 126static pmd_t * __init one_md_table_init(pud_t *pud)
 127{
 128        if (pud_none(*pud)) {
 129                pmd_t *pmd;
 130
 131                pmd = alloc_bootmem_pages(PAGE_SIZE);
 132                pud_populate(&init_mm, pud, pmd);
 133                BUG_ON(pmd != pmd_offset(pud, 0));
 134        }
 135
 136        return pmd_offset(pud, 0);
 137}
 138
 139static pte_t * __init one_page_table_init(pmd_t *pmd)
 140{
 141        if (pmd_none(*pmd)) {
 142                pte_t *pte;
 143
 144                pte = alloc_bootmem_pages(PAGE_SIZE);
 145                pmd_populate_kernel(&init_mm, pmd, pte);
 146                BUG_ON(pte != pte_offset_kernel(pmd, 0));
 147        }
 148
 149        return pte_offset_kernel(pmd, 0);
 150}
 151
 152static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
 153                                            unsigned long vaddr, pte_t *lastpte)
 154{
 155        return pte;
 156}
 157
 158void __init page_table_range_init(unsigned long start, unsigned long end,
 159                                         pgd_t *pgd_base)
 160{
 161        pgd_t *pgd;
 162        pud_t *pud;
 163        pmd_t *pmd;
 164        pte_t *pte = NULL;
 165        int i, j, k;
 166        unsigned long vaddr;
 167
 168        vaddr = start;
 169        i = __pgd_offset(vaddr);
 170        j = __pud_offset(vaddr);
 171        k = __pmd_offset(vaddr);
 172        pgd = pgd_base + i;
 173
 174        for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
 175                pud = (pud_t *)pgd;
 176                for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
 177                        pmd = one_md_table_init(pud);
 178#ifndef __PAGETABLE_PMD_FOLDED
 179                        pmd += k;
 180#endif
 181                        for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
 182                                pte = page_table_kmap_check(one_page_table_init(pmd),
 183                                                            pmd, vaddr, pte);
 184                                vaddr += PMD_SIZE;
 185                        }
 186                        k = 0;
 187                }
 188                j = 0;
 189        }
 190}
 191#endif  /* CONFIG_MMU */
 192
 193void __init allocate_pgdat(unsigned int nid)
 194{
 195        unsigned long start_pfn, end_pfn;
 196#ifdef CONFIG_NEED_MULTIPLE_NODES
 197        unsigned long phys;
 198#endif
 199
 200        get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 201
 202#ifdef CONFIG_NEED_MULTIPLE_NODES
 203        phys = __memblock_alloc_base(sizeof(struct pglist_data),
 204                                SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
 205        /* Retry with all of system memory */
 206        if (!phys)
 207                phys = __memblock_alloc_base(sizeof(struct pglist_data),
 208                                        SMP_CACHE_BYTES, memblock_end_of_DRAM());
 209        if (!phys)
 210                panic("Can't allocate pgdat for node %d\n", nid);
 211
 212        NODE_DATA(nid) = __va(phys);
 213        memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
 214#endif
 215
 216        NODE_DATA(nid)->node_start_pfn = start_pfn;
 217        NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
 218}
 219
 220static void __init do_init_bootmem(void)
 221{
 222        struct memblock_region *reg;
 223
 224        /* Add active regions with valid PFNs. */
 225        for_each_memblock(memory, reg) {
 226                unsigned long start_pfn, end_pfn;
 227                start_pfn = memblock_region_memory_base_pfn(reg);
 228                end_pfn = memblock_region_memory_end_pfn(reg);
 229                __add_active_range(0, start_pfn, end_pfn);
 230        }
 231
 232        /* All of system RAM sits in node 0 for the non-NUMA case */
 233        allocate_pgdat(0);
 234        node_set_online(0);
 235
 236        plat_mem_setup();
 237
 238        for_each_memblock(memory, reg) {
 239                int nid = memblock_get_region_node(reg);
 240
 241                memory_present(nid, memblock_region_memory_base_pfn(reg),
 242                        memblock_region_memory_end_pfn(reg));
 243        }
 244        sparse_init();
 245}
 246
 247static void __init early_reserve_mem(void)
 248{
 249        unsigned long start_pfn;
 250        u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
 251        u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
 252
 253        /*
 254         * Partially used pages are not usable - thus
 255         * we are rounding upwards:
 256         */
 257        start_pfn = PFN_UP(__pa(_end));
 258
 259        /*
 260         * Reserve the kernel text and Reserve the bootmem bitmap. We do
 261         * this in two steps (first step was init_bootmem()), because
 262         * this catches the (definitely buggy) case of us accidentally
 263         * initializing the bootmem allocator with an invalid RAM area.
 264         */
 265        memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
 266
 267        /*
 268         * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
 269         */
 270        if (CONFIG_ZERO_PAGE_OFFSET != 0)
 271                memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
 272
 273        /*
 274         * Handle additional early reservations
 275         */
 276        check_for_initrd();
 277        reserve_crashkernel();
 278}
 279
 280void __init paging_init(void)
 281{
 282        unsigned long max_zone_pfns[MAX_NR_ZONES];
 283        unsigned long vaddr, end;
 284
 285        sh_mv.mv_mem_init();
 286
 287        early_reserve_mem();
 288
 289        /*
 290         * Once the early reservations are out of the way, give the
 291         * platforms a chance to kick out some memory.
 292         */
 293        if (sh_mv.mv_mem_reserve)
 294                sh_mv.mv_mem_reserve();
 295
 296        memblock_enforce_memory_limit(memory_limit);
 297        memblock_allow_resize();
 298
 299        memblock_dump_all();
 300
 301        /*
 302         * Determine low and high memory ranges:
 303         */
 304        max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
 305        min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
 306
 307        nodes_clear(node_online_map);
 308
 309        memory_start = (unsigned long)__va(__MEMORY_START);
 310        memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
 311
 312        uncached_init();
 313        pmb_init();
 314        do_init_bootmem();
 315        ioremap_fixed_init();
 316
 317        /* We don't need to map the kernel through the TLB, as
 318         * it is permanatly mapped using P1. So clear the
 319         * entire pgd. */
 320        memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
 321
 322        /* Set an initial value for the MMU.TTB so we don't have to
 323         * check for a null value. */
 324        set_TTB(swapper_pg_dir);
 325
 326        /*
 327         * Populate the relevant portions of swapper_pg_dir so that
 328         * we can use the fixmap entries without calling kmalloc.
 329         * pte's will be filled in by __set_fixmap().
 330         */
 331        vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
 332        end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
 333        page_table_range_init(vaddr, end, swapper_pg_dir);
 334
 335        kmap_coherent_init();
 336
 337        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 338        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 339        free_area_init_nodes(max_zone_pfns);
 340}
 341
 342unsigned int mem_init_done = 0;
 343
 344void __init mem_init(void)
 345{
 346        pg_data_t *pgdat;
 347
 348        high_memory = NULL;
 349        for_each_online_pgdat(pgdat)
 350                high_memory = max_t(void *, high_memory,
 351                                    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
 352
 353        free_all_bootmem();
 354
 355        /* Set this up early, so we can take care of the zero page */
 356        cpu_cache_init();
 357
 358        /* clear the zero-page */
 359        memset(empty_zero_page, 0, PAGE_SIZE);
 360        __flush_wback_region(empty_zero_page, PAGE_SIZE);
 361
 362        vsyscall_init();
 363
 364        mem_init_print_info(NULL);
 365        pr_info("virtual kernel memory layout:\n"
 366                "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 367#ifdef CONFIG_HIGHMEM
 368                "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 369#endif
 370                "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
 371                "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
 372#ifdef CONFIG_UNCACHED_MAPPING
 373                "            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
 374#endif
 375                "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 376                "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 377                "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
 378                FIXADDR_START, FIXADDR_TOP,
 379                (FIXADDR_TOP - FIXADDR_START) >> 10,
 380
 381#ifdef CONFIG_HIGHMEM
 382                PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
 383                (LAST_PKMAP*PAGE_SIZE) >> 10,
 384#endif
 385
 386                (unsigned long)VMALLOC_START, VMALLOC_END,
 387                (VMALLOC_END - VMALLOC_START) >> 20,
 388
 389                (unsigned long)memory_start, (unsigned long)high_memory,
 390                ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
 391
 392#ifdef CONFIG_UNCACHED_MAPPING
 393                uncached_start, uncached_end, uncached_size >> 20,
 394#endif
 395
 396                (unsigned long)&__init_begin, (unsigned long)&__init_end,
 397                ((unsigned long)&__init_end -
 398                 (unsigned long)&__init_begin) >> 10,
 399
 400                (unsigned long)&_etext, (unsigned long)&_edata,
 401                ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
 402
 403                (unsigned long)&_text, (unsigned long)&_etext,
 404                ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
 405
 406        mem_init_done = 1;
 407}
 408
 409void free_initmem(void)
 410{
 411        free_initmem_default(-1);
 412}
 413
 414#ifdef CONFIG_BLK_DEV_INITRD
 415void free_initrd_mem(unsigned long start, unsigned long end)
 416{
 417        free_reserved_area((void *)start, (void *)end, -1, "initrd");
 418}
 419#endif
 420
 421#ifdef CONFIG_MEMORY_HOTPLUG
 422int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
 423                bool want_memblock)
 424{
 425        unsigned long start_pfn = PFN_DOWN(start);
 426        unsigned long nr_pages = size >> PAGE_SHIFT;
 427        int ret;
 428
 429        /* We only have ZONE_NORMAL, so this is easy.. */
 430        ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
 431        if (unlikely(ret))
 432                printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
 433
 434        return ret;
 435}
 436
 437#ifdef CONFIG_NUMA
 438int memory_add_physaddr_to_nid(u64 addr)
 439{
 440        /* Node 0 for now.. */
 441        return 0;
 442}
 443EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 444#endif
 445
 446#ifdef CONFIG_MEMORY_HOTREMOVE
 447int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
 448{
 449        unsigned long start_pfn = PFN_DOWN(start);
 450        unsigned long nr_pages = size >> PAGE_SHIFT;
 451        struct zone *zone;
 452        int ret;
 453
 454        zone = page_zone(pfn_to_page(start_pfn));
 455        ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
 456        if (unlikely(ret))
 457                pr_warn("%s: Failed, __remove_pages() == %d\n", __func__,
 458                        ret);
 459
 460        return ret;
 461}
 462#endif
 463#endif /* CONFIG_MEMORY_HOTPLUG */
 464