linux/arch/sh/mm/init.c
<<
>>
Prefs
   1/*
   2 * linux/arch/sh/mm/init.c
   3 *
   4 *  Copyright (C) 1999  Niibe Yutaka
   5 *  Copyright (C) 2002 - 2011  Paul Mundt
   6 *
   7 *  Based on linux/arch/i386/mm/init.c:
   8 *   Copyright (C) 1995  Linus Torvalds
   9 */
  10#include <linux/mm.h>
  11#include <linux/swap.h>
  12#include <linux/init.h>
  13#include <linux/gfp.h>
  14#include <linux/bootmem.h>
  15#include <linux/proc_fs.h>
  16#include <linux/pagemap.h>
  17#include <linux/percpu.h>
  18#include <linux/io.h>
  19#include <linux/memblock.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/export.h>
  22#include <asm/mmu_context.h>
  23#include <asm/mmzone.h>
  24#include <asm/kexec.h>
  25#include <asm/tlb.h>
  26#include <asm/cacheflush.h>
  27#include <asm/sections.h>
  28#include <asm/setup.h>
  29#include <asm/cache.h>
  30#include <asm/sizes.h>
  31
  32pgd_t swapper_pg_dir[PTRS_PER_PGD];
  33
  34void __init generic_mem_init(void)
  35{
  36        memblock_add(__MEMORY_START, __MEMORY_SIZE);
  37}
  38
  39void __init __weak plat_mem_setup(void)
  40{
  41        /* Nothing to see here, move along. */
  42}
  43
  44#ifdef CONFIG_MMU
  45static pte_t *__get_pte_phys(unsigned long addr)
  46{
  47        pgd_t *pgd;
  48        pud_t *pud;
  49        pmd_t *pmd;
  50
  51        pgd = pgd_offset_k(addr);
  52        if (pgd_none(*pgd)) {
  53                pgd_ERROR(*pgd);
  54                return NULL;
  55        }
  56
  57        pud = pud_alloc(NULL, pgd, addr);
  58        if (unlikely(!pud)) {
  59                pud_ERROR(*pud);
  60                return NULL;
  61        }
  62
  63        pmd = pmd_alloc(NULL, pud, addr);
  64        if (unlikely(!pmd)) {
  65                pmd_ERROR(*pmd);
  66                return NULL;
  67        }
  68
  69        return pte_offset_kernel(pmd, addr);
  70}
  71
  72static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
  73{
  74        pte_t *pte;
  75
  76        pte = __get_pte_phys(addr);
  77        if (!pte_none(*pte)) {
  78                pte_ERROR(*pte);
  79                return;
  80        }
  81
  82        set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
  83        local_flush_tlb_one(get_asid(), addr);
  84
  85        if (pgprot_val(prot) & _PAGE_WIRED)
  86                tlb_wire_entry(NULL, addr, *pte);
  87}
  88
  89static void clear_pte_phys(unsigned long addr, pgprot_t prot)
  90{
  91        pte_t *pte;
  92
  93        pte = __get_pte_phys(addr);
  94
  95        if (pgprot_val(prot) & _PAGE_WIRED)
  96                tlb_unwire_entry();
  97
  98        set_pte(pte, pfn_pte(0, __pgprot(0)));
  99        local_flush_tlb_one(get_asid(), addr);
 100}
 101
 102void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
 103{
 104        unsigned long address = __fix_to_virt(idx);
 105
 106        if (idx >= __end_of_fixed_addresses) {
 107                BUG();
 108                return;
 109        }
 110
 111        set_pte_phys(address, phys, prot);
 112}
 113
 114void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
 115{
 116        unsigned long address = __fix_to_virt(idx);
 117
 118        if (idx >= __end_of_fixed_addresses) {
 119                BUG();
 120                return;
 121        }
 122
 123        clear_pte_phys(address, prot);
 124}
 125
 126static pmd_t * __init one_md_table_init(pud_t *pud)
 127{
 128        if (pud_none(*pud)) {
 129                pmd_t *pmd;
 130
 131                pmd = alloc_bootmem_pages(PAGE_SIZE);
 132                pud_populate(&init_mm, pud, pmd);
 133                BUG_ON(pmd != pmd_offset(pud, 0));
 134        }
 135
 136        return pmd_offset(pud, 0);
 137}
 138
 139static pte_t * __init one_page_table_init(pmd_t *pmd)
 140{
 141        if (pmd_none(*pmd)) {
 142                pte_t *pte;
 143
 144                pte = alloc_bootmem_pages(PAGE_SIZE);
 145                pmd_populate_kernel(&init_mm, pmd, pte);
 146                BUG_ON(pte != pte_offset_kernel(pmd, 0));
 147        }
 148
 149        return pte_offset_kernel(pmd, 0);
 150}
 151
 152static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
 153                                            unsigned long vaddr, pte_t *lastpte)
 154{
 155        return pte;
 156}
 157
 158void __init page_table_range_init(unsigned long start, unsigned long end,
 159                                         pgd_t *pgd_base)
 160{
 161        pgd_t *pgd;
 162        pud_t *pud;
 163        pmd_t *pmd;
 164        pte_t *pte = NULL;
 165        int i, j, k;
 166        unsigned long vaddr;
 167
 168        vaddr = start;
 169        i = __pgd_offset(vaddr);
 170        j = __pud_offset(vaddr);
 171        k = __pmd_offset(vaddr);
 172        pgd = pgd_base + i;
 173
 174        for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
 175                pud = (pud_t *)pgd;
 176                for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
 177                        pmd = one_md_table_init(pud);
 178#ifndef __PAGETABLE_PMD_FOLDED
 179                        pmd += k;
 180#endif
 181                        for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
 182                                pte = page_table_kmap_check(one_page_table_init(pmd),
 183                                                            pmd, vaddr, pte);
 184                                vaddr += PMD_SIZE;
 185                        }
 186                        k = 0;
 187                }
 188                j = 0;
 189        }
 190}
 191#endif  /* CONFIG_MMU */
 192
 193void __init allocate_pgdat(unsigned int nid)
 194{
 195        unsigned long start_pfn, end_pfn;
 196#ifdef CONFIG_NEED_MULTIPLE_NODES
 197        unsigned long phys;
 198#endif
 199
 200        get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 201
 202#ifdef CONFIG_NEED_MULTIPLE_NODES
 203        phys = __memblock_alloc_base(sizeof(struct pglist_data),
 204                                SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
 205        /* Retry with all of system memory */
 206        if (!phys)
 207                phys = __memblock_alloc_base(sizeof(struct pglist_data),
 208                                        SMP_CACHE_BYTES, memblock_end_of_DRAM());
 209        if (!phys)
 210                panic("Can't allocate pgdat for node %d\n", nid);
 211
 212        NODE_DATA(nid) = __va(phys);
 213        memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
 214
 215        NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
 216#endif
 217
 218        NODE_DATA(nid)->node_start_pfn = start_pfn;
 219        NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
 220}
 221
 222static void __init bootmem_init_one_node(unsigned int nid)
 223{
 224        unsigned long total_pages, paddr;
 225        unsigned long end_pfn;
 226        struct pglist_data *p;
 227
 228        p = NODE_DATA(nid);
 229
 230        /* Nothing to do.. */
 231        if (!p->node_spanned_pages)
 232                return;
 233
 234        end_pfn = pgdat_end_pfn(p);
 235
 236        total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
 237
 238        paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
 239        if (!paddr)
 240                panic("Can't allocate bootmap for nid[%d]\n", nid);
 241
 242        init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
 243
 244        free_bootmem_with_active_regions(nid, end_pfn);
 245
 246        /*
 247         * XXX Handle initial reservations for the system memory node
 248         * only for the moment, we'll refactor this later for handling
 249         * reservations in other nodes.
 250         */
 251        if (nid == 0) {
 252                struct memblock_region *reg;
 253
 254                /* Reserve the sections we're already using. */
 255                for_each_memblock(reserved, reg) {
 256                        reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
 257                }
 258        }
 259
 260        sparse_memory_present_with_active_regions(nid);
 261}
 262
 263static void __init do_init_bootmem(void)
 264{
 265        struct memblock_region *reg;
 266        int i;
 267
 268        /* Add active regions with valid PFNs. */
 269        for_each_memblock(memory, reg) {
 270                unsigned long start_pfn, end_pfn;
 271                start_pfn = memblock_region_memory_base_pfn(reg);
 272                end_pfn = memblock_region_memory_end_pfn(reg);
 273                __add_active_range(0, start_pfn, end_pfn);
 274        }
 275
 276        /* All of system RAM sits in node 0 for the non-NUMA case */
 277        allocate_pgdat(0);
 278        node_set_online(0);
 279
 280        plat_mem_setup();
 281
 282        for_each_online_node(i)
 283                bootmem_init_one_node(i);
 284
 285        sparse_init();
 286}
 287
 288static void __init early_reserve_mem(void)
 289{
 290        unsigned long start_pfn;
 291        u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
 292        u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
 293
 294        /*
 295         * Partially used pages are not usable - thus
 296         * we are rounding upwards:
 297         */
 298        start_pfn = PFN_UP(__pa(_end));
 299
 300        /*
 301         * Reserve the kernel text and Reserve the bootmem bitmap. We do
 302         * this in two steps (first step was init_bootmem()), because
 303         * this catches the (definitely buggy) case of us accidentally
 304         * initializing the bootmem allocator with an invalid RAM area.
 305         */
 306        memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
 307
 308        /*
 309         * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
 310         */
 311        if (CONFIG_ZERO_PAGE_OFFSET != 0)
 312                memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
 313
 314        /*
 315         * Handle additional early reservations
 316         */
 317        check_for_initrd();
 318        reserve_crashkernel();
 319}
 320
 321void __init paging_init(void)
 322{
 323        unsigned long max_zone_pfns[MAX_NR_ZONES];
 324        unsigned long vaddr, end;
 325        int nid;
 326
 327        sh_mv.mv_mem_init();
 328
 329        early_reserve_mem();
 330
 331        /*
 332         * Once the early reservations are out of the way, give the
 333         * platforms a chance to kick out some memory.
 334         */
 335        if (sh_mv.mv_mem_reserve)
 336                sh_mv.mv_mem_reserve();
 337
 338        memblock_enforce_memory_limit(memory_limit);
 339        memblock_allow_resize();
 340
 341        memblock_dump_all();
 342
 343        /*
 344         * Determine low and high memory ranges:
 345         */
 346        max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
 347        min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
 348
 349        nodes_clear(node_online_map);
 350
 351        memory_start = (unsigned long)__va(__MEMORY_START);
 352        memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
 353
 354        uncached_init();
 355        pmb_init();
 356        do_init_bootmem();
 357        ioremap_fixed_init();
 358
 359        /* We don't need to map the kernel through the TLB, as
 360         * it is permanatly mapped using P1. So clear the
 361         * entire pgd. */
 362        memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
 363
 364        /* Set an initial value for the MMU.TTB so we don't have to
 365         * check for a null value. */
 366        set_TTB(swapper_pg_dir);
 367
 368        /*
 369         * Populate the relevant portions of swapper_pg_dir so that
 370         * we can use the fixmap entries without calling kmalloc.
 371         * pte's will be filled in by __set_fixmap().
 372         */
 373        vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
 374        end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
 375        page_table_range_init(vaddr, end, swapper_pg_dir);
 376
 377        kmap_coherent_init();
 378
 379        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 380
 381        for_each_online_node(nid) {
 382                pg_data_t *pgdat = NODE_DATA(nid);
 383                unsigned long low, start_pfn;
 384
 385                start_pfn = pgdat->bdata->node_min_pfn;
 386                low = pgdat->bdata->node_low_pfn;
 387
 388                if (max_zone_pfns[ZONE_NORMAL] < low)
 389                        max_zone_pfns[ZONE_NORMAL] = low;
 390
 391                printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
 392                       nid, start_pfn, low);
 393        }
 394
 395        free_area_init_nodes(max_zone_pfns);
 396}
 397
 398/*
 399 * Early initialization for any I/O MMUs we might have.
 400 */
 401static void __init iommu_init(void)
 402{
 403        no_iommu_init();
 404}
 405
 406unsigned int mem_init_done = 0;
 407
 408void __init mem_init(void)
 409{
 410        pg_data_t *pgdat;
 411
 412        iommu_init();
 413
 414        high_memory = NULL;
 415        for_each_online_pgdat(pgdat)
 416                high_memory = max_t(void *, high_memory,
 417                                    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
 418
 419        free_all_bootmem();
 420
 421        /* Set this up early, so we can take care of the zero page */
 422        cpu_cache_init();
 423
 424        /* clear the zero-page */
 425        memset(empty_zero_page, 0, PAGE_SIZE);
 426        __flush_wback_region(empty_zero_page, PAGE_SIZE);
 427
 428        vsyscall_init();
 429
 430        mem_init_print_info(NULL);
 431        pr_info("virtual kernel memory layout:\n"
 432                "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 433#ifdef CONFIG_HIGHMEM
 434                "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 435#endif
 436                "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
 437                "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
 438#ifdef CONFIG_UNCACHED_MAPPING
 439                "            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
 440#endif
 441                "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 442                "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 443                "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
 444                FIXADDR_START, FIXADDR_TOP,
 445                (FIXADDR_TOP - FIXADDR_START) >> 10,
 446
 447#ifdef CONFIG_HIGHMEM
 448                PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
 449                (LAST_PKMAP*PAGE_SIZE) >> 10,
 450#endif
 451
 452                (unsigned long)VMALLOC_START, VMALLOC_END,
 453                (VMALLOC_END - VMALLOC_START) >> 20,
 454
 455                (unsigned long)memory_start, (unsigned long)high_memory,
 456                ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
 457
 458#ifdef CONFIG_UNCACHED_MAPPING
 459                uncached_start, uncached_end, uncached_size >> 20,
 460#endif
 461
 462                (unsigned long)&__init_begin, (unsigned long)&__init_end,
 463                ((unsigned long)&__init_end -
 464                 (unsigned long)&__init_begin) >> 10,
 465
 466                (unsigned long)&_etext, (unsigned long)&_edata,
 467                ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
 468
 469                (unsigned long)&_text, (unsigned long)&_etext,
 470                ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
 471
 472        mem_init_done = 1;
 473}
 474
 475void free_initmem(void)
 476{
 477        free_initmem_default(-1);
 478}
 479
 480#ifdef CONFIG_BLK_DEV_INITRD
 481void free_initrd_mem(unsigned long start, unsigned long end)
 482{
 483        free_reserved_area((void *)start, (void *)end, -1, "initrd");
 484}
 485#endif
 486
 487#ifdef CONFIG_MEMORY_HOTPLUG
 488int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
 489{
 490        unsigned long start_pfn = PFN_DOWN(start);
 491        unsigned long nr_pages = size >> PAGE_SHIFT;
 492        int ret;
 493
 494        /* We only have ZONE_NORMAL, so this is easy.. */
 495        ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
 496        if (unlikely(ret))
 497                printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
 498
 499        return ret;
 500}
 501EXPORT_SYMBOL_GPL(arch_add_memory);
 502
 503#ifdef CONFIG_NUMA
 504int memory_add_physaddr_to_nid(u64 addr)
 505{
 506        /* Node 0 for now.. */
 507        return 0;
 508}
 509EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 510#endif
 511
 512#ifdef CONFIG_MEMORY_HOTREMOVE
 513int arch_remove_memory(u64 start, u64 size)
 514{
 515        unsigned long start_pfn = PFN_DOWN(start);
 516        unsigned long nr_pages = size >> PAGE_SHIFT;
 517        struct zone *zone;
 518        int ret;
 519
 520        zone = page_zone(pfn_to_page(start_pfn));
 521        ret = __remove_pages(zone, start_pfn, nr_pages);
 522        if (unlikely(ret))
 523                pr_warn("%s: Failed, __remove_pages() == %d\n", __func__,
 524                        ret);
 525
 526        return ret;
 527}
 528#endif
 529#endif /* CONFIG_MEMORY_HOTPLUG */
 530