linux/arch/arm/mm/init.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/init.c
   3 *
   4 *  Copyright (C) 1995-2005 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/kernel.h>
  11#include <linux/errno.h>
  12#include <linux/swap.h>
  13#include <linux/init.h>
  14#include <linux/bootmem.h>
  15#include <linux/mman.h>
  16#include <linux/nodemask.h>
  17#include <linux/initrd.h>
  18#include <linux/of_fdt.h>
  19#include <linux/highmem.h>
  20#include <linux/gfp.h>
  21#include <linux/memblock.h>
  22#include <linux/sort.h>
  23
  24#include <asm/mach-types.h>
  25#include <asm/prom.h>
  26#include <asm/sections.h>
  27#include <asm/setup.h>
  28#include <asm/sizes.h>
  29#include <asm/tlb.h>
  30#include <asm/fixmap.h>
  31
  32#include <asm/mach/arch.h>
  33#include <asm/mach/map.h>
  34
  35#include "mm.h"
  36
  37static unsigned long phys_initrd_start __initdata = 0;
  38static unsigned long phys_initrd_size __initdata = 0;
  39
  40static int __init early_initrd(char *p)
  41{
  42        unsigned long start, size;
  43        char *endp;
  44
  45        start = memparse(p, &endp);
  46        if (*endp == ',') {
  47                size = memparse(endp + 1, NULL);
  48
  49                phys_initrd_start = start;
  50                phys_initrd_size = size;
  51        }
  52        return 0;
  53}
  54early_param("initrd", early_initrd);
  55
  56static int __init parse_tag_initrd(const struct tag *tag)
  57{
  58        printk(KERN_WARNING "ATAG_INITRD is deprecated; "
  59                "please update your bootloader.\n");
  60        phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
  61        phys_initrd_size = tag->u.initrd.size;
  62        return 0;
  63}
  64
  65__tagtable(ATAG_INITRD, parse_tag_initrd);
  66
  67static int __init parse_tag_initrd2(const struct tag *tag)
  68{
  69        phys_initrd_start = tag->u.initrd.start;
  70        phys_initrd_size = tag->u.initrd.size;
  71        return 0;
  72}
  73
  74__tagtable(ATAG_INITRD2, parse_tag_initrd2);
  75
  76#ifdef CONFIG_OF_FLATTREE
  77void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
  78{
  79        phys_initrd_start = start;
  80        phys_initrd_size = end - start;
  81}
  82#endif /* CONFIG_OF_FLATTREE */
  83
  84/*
  85 * This keeps memory configuration data used by a couple memory
  86 * initialization functions, as well as show_mem() for the skipping
  87 * of holes in the memory map.  It is populated by arm_add_memory().
  88 */
  89struct meminfo meminfo;
  90
  91void show_mem(unsigned int filter)
  92{
  93        int free = 0, total = 0, reserved = 0;
  94        int shared = 0, cached = 0, slab = 0, i;
  95        struct meminfo * mi = &meminfo;
  96
  97        printk("Mem-info:\n");
  98        show_free_areas(filter);
  99
 100        for_each_bank (i, mi) {
 101                struct membank *bank = &mi->bank[i];
 102                unsigned int pfn1, pfn2;
 103                struct page *page, *end;
 104
 105                pfn1 = bank_pfn_start(bank);
 106                pfn2 = bank_pfn_end(bank);
 107
 108                page = pfn_to_page(pfn1);
 109                end  = pfn_to_page(pfn2 - 1) + 1;
 110
 111                do {
 112                        total++;
 113                        if (PageReserved(page))
 114                                reserved++;
 115                        else if (PageSwapCache(page))
 116                                cached++;
 117                        else if (PageSlab(page))
 118                                slab++;
 119                        else if (!page_count(page))
 120                                free++;
 121                        else
 122                                shared += page_count(page) - 1;
 123                        page++;
 124                } while (page < end);
 125        }
 126
 127        printk("%d pages of RAM\n", total);
 128        printk("%d free pages\n", free);
 129        printk("%d reserved pages\n", reserved);
 130        printk("%d slab pages\n", slab);
 131        printk("%d pages shared\n", shared);
 132        printk("%d pages swap cached\n", cached);
 133}
 134
 135static void __init find_limits(unsigned long *min, unsigned long *max_low,
 136        unsigned long *max_high)
 137{
 138        struct meminfo *mi = &meminfo;
 139        int i;
 140
 141        *min = -1UL;
 142        *max_low = *max_high = 0;
 143
 144        for_each_bank (i, mi) {
 145                struct membank *bank = &mi->bank[i];
 146                unsigned long start, end;
 147
 148                start = bank_pfn_start(bank);
 149                end = bank_pfn_end(bank);
 150
 151                if (*min > start)
 152                        *min = start;
 153                if (*max_high < end)
 154                        *max_high = end;
 155                if (bank->highmem)
 156                        continue;
 157                if (*max_low < end)
 158                        *max_low = end;
 159        }
 160}
 161
 162static void __init arm_bootmem_init(unsigned long start_pfn,
 163        unsigned long end_pfn)
 164{
 165        struct memblock_region *reg;
 166        unsigned int boot_pages;
 167        phys_addr_t bitmap;
 168        pg_data_t *pgdat;
 169
 170        /*
 171         * Allocate the bootmem bitmap page.  This must be in a region
 172         * of memory which has already been mapped.
 173         */
 174        boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
 175        bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
 176                                __pfn_to_phys(end_pfn));
 177
 178        /*
 179         * Initialise the bootmem allocator, handing the
 180         * memory banks over to bootmem.
 181         */
 182        node_set_online(0);
 183        pgdat = NODE_DATA(0);
 184        init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
 185
 186        /* Free the lowmem regions from memblock into bootmem. */
 187        for_each_memblock(memory, reg) {
 188                unsigned long start = memblock_region_memory_base_pfn(reg);
 189                unsigned long end = memblock_region_memory_end_pfn(reg);
 190
 191                if (end >= end_pfn)
 192                        end = end_pfn;
 193                if (start >= end)
 194                        break;
 195
 196                free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
 197        }
 198
 199        /* Reserve the lowmem memblock reserved regions in bootmem. */
 200        for_each_memblock(reserved, reg) {
 201                unsigned long start = memblock_region_reserved_base_pfn(reg);
 202                unsigned long end = memblock_region_reserved_end_pfn(reg);
 203
 204                if (end >= end_pfn)
 205                        end = end_pfn;
 206                if (start >= end)
 207                        break;
 208
 209                reserve_bootmem(__pfn_to_phys(start),
 210                                (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
 211        }
 212}
 213
 214#ifdef CONFIG_ZONE_DMA
 215static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 216        unsigned long dma_size)
 217{
 218        if (size[0] <= dma_size)
 219                return;
 220
 221        size[ZONE_NORMAL] = size[0] - dma_size;
 222        size[ZONE_DMA] = dma_size;
 223        hole[ZONE_NORMAL] = hole[0];
 224        hole[ZONE_DMA] = 0;
 225}
 226#endif
 227
 228static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
 229        unsigned long max_high)
 230{
 231        unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
 232        struct memblock_region *reg;
 233
 234        /*
 235         * initialise the zones.
 236         */
 237        memset(zone_size, 0, sizeof(zone_size));
 238
 239        /*
 240         * The memory size has already been determined.  If we need
 241         * to do anything fancy with the allocation of this memory
 242         * to the zones, now is the time to do it.
 243         */
 244        zone_size[0] = max_low - min;
 245#ifdef CONFIG_HIGHMEM
 246        zone_size[ZONE_HIGHMEM] = max_high - max_low;
 247#endif
 248
 249        /*
 250         * Calculate the size of the holes.
 251         *  holes = node_size - sum(bank_sizes)
 252         */
 253        memcpy(zhole_size, zone_size, sizeof(zhole_size));
 254        for_each_memblock(memory, reg) {
 255                unsigned long start = memblock_region_memory_base_pfn(reg);
 256                unsigned long end = memblock_region_memory_end_pfn(reg);
 257
 258                if (start < max_low) {
 259                        unsigned long low_end = min(end, max_low);
 260                        zhole_size[0] -= low_end - start;
 261                }
 262#ifdef CONFIG_HIGHMEM
 263                if (end > max_low) {
 264                        unsigned long high_start = max(start, max_low);
 265                        zhole_size[ZONE_HIGHMEM] -= end - high_start;
 266                }
 267#endif
 268        }
 269
 270#ifdef ARM_DMA_ZONE_SIZE
 271#ifndef CONFIG_ZONE_DMA
 272#error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations
 273#endif
 274
 275        /*
 276         * Adjust the sizes according to any special requirements for
 277         * this machine type.
 278         */
 279        arm_adjust_dma_zone(zone_size, zhole_size,
 280                ARM_DMA_ZONE_SIZE >> PAGE_SHIFT);
 281#endif
 282
 283        free_area_init_node(0, zone_size, min, zhole_size);
 284}
 285
 286#ifdef CONFIG_HAVE_ARCH_PFN_VALID
 287int pfn_valid(unsigned long pfn)
 288{
 289        return memblock_is_memory(pfn << PAGE_SHIFT);
 290}
 291EXPORT_SYMBOL(pfn_valid);
 292#endif
 293
 294#ifndef CONFIG_SPARSEMEM
 295static void arm_memory_present(void)
 296{
 297}
 298#else
 299static void arm_memory_present(void)
 300{
 301        struct memblock_region *reg;
 302
 303        for_each_memblock(memory, reg)
 304                memory_present(0, memblock_region_memory_base_pfn(reg),
 305                               memblock_region_memory_end_pfn(reg));
 306}
 307#endif
 308
 309static int __init meminfo_cmp(const void *_a, const void *_b)
 310{
 311        const struct membank *a = _a, *b = _b;
 312        long cmp = bank_pfn_start(a) - bank_pfn_start(b);
 313        return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
 314}
 315
 316void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
 317{
 318        int i;
 319
 320        sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
 321
 322        memblock_init();
 323        for (i = 0; i < mi->nr_banks; i++)
 324                memblock_add(mi->bank[i].start, mi->bank[i].size);
 325
 326        /* Register the kernel text, kernel data and initrd with memblock. */
 327#ifdef CONFIG_XIP_KERNEL
 328        memblock_reserve(__pa(_sdata), _end - _sdata);
 329#else
 330        memblock_reserve(__pa(_stext), _end - _stext);
 331#endif
 332#ifdef CONFIG_BLK_DEV_INITRD
 333        if (phys_initrd_size &&
 334            !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
 335                pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
 336                       phys_initrd_start, phys_initrd_size);
 337                phys_initrd_start = phys_initrd_size = 0;
 338        }
 339        if (phys_initrd_size &&
 340            memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
 341                pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
 342                       phys_initrd_start, phys_initrd_size);
 343                phys_initrd_start = phys_initrd_size = 0;
 344        }
 345        if (phys_initrd_size) {
 346                memblock_reserve(phys_initrd_start, phys_initrd_size);
 347
 348                /* Now convert initrd to virtual addresses */
 349                initrd_start = __phys_to_virt(phys_initrd_start);
 350                initrd_end = initrd_start + phys_initrd_size;
 351        }
 352#endif
 353
 354        arm_mm_memblock_reserve();
 355        arm_dt_memblock_reserve();
 356
 357        /* reserve any platform specific memblock areas */
 358        if (mdesc->reserve)
 359                mdesc->reserve();
 360
 361        memblock_analyze();
 362        memblock_dump_all();
 363}
 364
 365void __init bootmem_init(void)
 366{
 367        unsigned long min, max_low, max_high;
 368
 369        max_low = max_high = 0;
 370
 371        find_limits(&min, &max_low, &max_high);
 372
 373        arm_bootmem_init(min, max_low);
 374
 375        /*
 376         * Sparsemem tries to allocate bootmem in memory_present(),
 377         * so must be done after the fixed reservations
 378         */
 379        arm_memory_present();
 380
 381        /*
 382         * sparse_init() needs the bootmem allocator up and running.
 383         */
 384        sparse_init();
 385
 386        /*
 387         * Now free the memory - free_area_init_node needs
 388         * the sparse mem_map arrays initialized by sparse_init()
 389         * for memmap_init_zone(), otherwise all PFNs are invalid.
 390         */
 391        arm_bootmem_free(min, max_low, max_high);
 392
 393        high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
 394
 395        /*
 396         * This doesn't seem to be used by the Linux memory manager any
 397         * more, but is used by ll_rw_block.  If we can get rid of it, we
 398         * also get rid of some of the stuff above as well.
 399         *
 400         * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
 401         * the system, not the maximum PFN.
 402         */
 403        max_low_pfn = max_low - PHYS_PFN_OFFSET;
 404        max_pfn = max_high - PHYS_PFN_OFFSET;
 405}
 406
 407static inline int free_area(unsigned long pfn, unsigned long end, char *s)
 408{
 409        unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
 410
 411        for (; pfn < end; pfn++) {
 412                struct page *page = pfn_to_page(pfn);
 413                ClearPageReserved(page);
 414                init_page_count(page);
 415                __free_page(page);
 416                pages++;
 417        }
 418
 419        if (size && s)
 420                printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
 421
 422        return pages;
 423}
 424
 425static inline void
 426free_memmap(unsigned long start_pfn, unsigned long end_pfn)
 427{
 428        struct page *start_pg, *end_pg;
 429        unsigned long pg, pgend;
 430
 431        /*
 432         * Convert start_pfn/end_pfn to a struct page pointer.
 433         */
 434        start_pg = pfn_to_page(start_pfn - 1) + 1;
 435        end_pg = pfn_to_page(end_pfn - 1) + 1;
 436
 437        /*
 438         * Convert to physical addresses, and
 439         * round start upwards and end downwards.
 440         */
 441        pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
 442        pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
 443
 444        /*
 445         * If there are free pages between these,
 446         * free the section of the memmap array.
 447         */
 448        if (pg < pgend)
 449                free_bootmem(pg, pgend - pg);
 450}
 451
 452/*
 453 * The mem_map array can get very big.  Free the unused area of the memory map.
 454 */
 455static void __init free_unused_memmap(struct meminfo *mi)
 456{
 457        unsigned long bank_start, prev_bank_end = 0;
 458        unsigned int i;
 459
 460        /*
 461         * This relies on each bank being in address order.
 462         * The banks are sorted previously in bootmem_init().
 463         */
 464        for_each_bank(i, mi) {
 465                struct membank *bank = &mi->bank[i];
 466
 467                bank_start = bank_pfn_start(bank);
 468
 469#ifdef CONFIG_SPARSEMEM
 470                /*
 471                 * Take care not to free memmap entries that don't exist
 472                 * due to SPARSEMEM sections which aren't present.
 473                 */
 474                bank_start = min(bank_start,
 475                                 ALIGN(prev_bank_end, PAGES_PER_SECTION));
 476#endif
 477                /*
 478                 * If we had a previous bank, and there is a space
 479                 * between the current bank and the previous, free it.
 480                 */
 481                if (prev_bank_end && prev_bank_end < bank_start)
 482                        free_memmap(prev_bank_end, bank_start);
 483
 484                /*
 485                 * Align up here since the VM subsystem insists that the
 486                 * memmap entries are valid from the bank end aligned to
 487                 * MAX_ORDER_NR_PAGES.
 488                 */
 489                prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
 490        }
 491
 492#ifdef CONFIG_SPARSEMEM
 493        if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
 494                free_memmap(prev_bank_end,
 495                            ALIGN(prev_bank_end, PAGES_PER_SECTION));
 496#endif
 497}
 498
 499static void __init free_highpages(void)
 500{
 501#ifdef CONFIG_HIGHMEM
 502        unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
 503        struct memblock_region *mem, *res;
 504
 505        /* set highmem page free */
 506        for_each_memblock(memory, mem) {
 507                unsigned long start = memblock_region_memory_base_pfn(mem);
 508                unsigned long end = memblock_region_memory_end_pfn(mem);
 509
 510                /* Ignore complete lowmem entries */
 511                if (end <= max_low)
 512                        continue;
 513
 514                /* Truncate partial highmem entries */
 515                if (start < max_low)
 516                        start = max_low;
 517
 518                /* Find and exclude any reserved regions */
 519                for_each_memblock(reserved, res) {
 520                        unsigned long res_start, res_end;
 521
 522                        res_start = memblock_region_reserved_base_pfn(res);
 523                        res_end = memblock_region_reserved_end_pfn(res);
 524
 525                        if (res_end < start)
 526                                continue;
 527                        if (res_start < start)
 528                                res_start = start;
 529                        if (res_start > end)
 530                                res_start = end;
 531                        if (res_end > end)
 532                                res_end = end;
 533                        if (res_start != start)
 534                                totalhigh_pages += free_area(start, res_start,
 535                                                             NULL);
 536                        start = res_end;
 537                        if (start == end)
 538                                break;
 539                }
 540
 541                /* And now free anything which remains */
 542                if (start < end)
 543                        totalhigh_pages += free_area(start, end, NULL);
 544        }
 545        totalram_pages += totalhigh_pages;
 546#endif
 547}
 548
 549/*
 550 * mem_init() marks the free areas in the mem_map and tells us how much
 551 * memory is free.  This is done after various parts of the system have
 552 * claimed their memory after the kernel image.
 553 */
 554void __init mem_init(void)
 555{
 556        unsigned long reserved_pages, free_pages;
 557        struct memblock_region *reg;
 558        int i;
 559#ifdef CONFIG_HAVE_TCM
 560        /* These pointers are filled in on TCM detection */
 561        extern u32 dtcm_end;
 562        extern u32 itcm_end;
 563#endif
 564
 565        max_mapnr   = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
 566
 567        /* this will put all unused low memory onto the freelists */
 568        free_unused_memmap(&meminfo);
 569
 570        totalram_pages += free_all_bootmem();
 571
 572#ifdef CONFIG_SA1111
 573        /* now that our DMA memory is actually so designated, we can free it */
 574        totalram_pages += free_area(PHYS_PFN_OFFSET,
 575                                    __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
 576#endif
 577
 578        free_highpages();
 579
 580        reserved_pages = free_pages = 0;
 581
 582        for_each_bank(i, &meminfo) {
 583                struct membank *bank = &meminfo.bank[i];
 584                unsigned int pfn1, pfn2;
 585                struct page *page, *end;
 586
 587                pfn1 = bank_pfn_start(bank);
 588                pfn2 = bank_pfn_end(bank);
 589
 590                page = pfn_to_page(pfn1);
 591                end  = pfn_to_page(pfn2 - 1) + 1;
 592
 593                do {
 594                        if (PageReserved(page))
 595                                reserved_pages++;
 596                        else if (!page_count(page))
 597                                free_pages++;
 598                        page++;
 599                } while (page < end);
 600        }
 601
 602        /*
 603         * Since our memory may not be contiguous, calculate the
 604         * real number of pages we have in this system
 605         */
 606        printk(KERN_INFO "Memory:");
 607        num_physpages = 0;
 608        for_each_memblock(memory, reg) {
 609                unsigned long pages = memblock_region_memory_end_pfn(reg) -
 610                        memblock_region_memory_base_pfn(reg);
 611                num_physpages += pages;
 612                printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
 613        }
 614        printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
 615
 616        printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
 617                nr_free_pages() << (PAGE_SHIFT-10),
 618                free_pages << (PAGE_SHIFT-10),
 619                reserved_pages << (PAGE_SHIFT-10),
 620                totalhigh_pages << (PAGE_SHIFT-10));
 621
 622#define MLK(b, t) b, t, ((t) - (b)) >> 10
 623#define MLM(b, t) b, t, ((t) - (b)) >> 20
 624#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
 625
 626        printk(KERN_NOTICE "Virtual kernel memory layout:\n"
 627                        "    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 628#ifdef CONFIG_HAVE_TCM
 629                        "    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 630                        "    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 631#endif
 632                        "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 633#ifdef CONFIG_MMU
 634                        "    DMA     : 0x%08lx - 0x%08lx   (%4ld MB)\n"
 635#endif
 636                        "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
 637                        "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
 638#ifdef CONFIG_HIGHMEM
 639                        "    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
 640#endif
 641                        "    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
 642                        "      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
 643                        "      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
 644                        "      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
 645                        "       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
 646
 647                        MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
 648                                (PAGE_SIZE)),
 649#ifdef CONFIG_HAVE_TCM
 650                        MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
 651                        MLK(ITCM_OFFSET, (unsigned long) itcm_end),
 652#endif
 653                        MLK(FIXADDR_START, FIXADDR_TOP),
 654#ifdef CONFIG_MMU
 655                        MLM(CONSISTENT_BASE, CONSISTENT_END),
 656#endif
 657                        MLM(VMALLOC_START, VMALLOC_END),
 658                        MLM(PAGE_OFFSET, (unsigned long)high_memory),
 659#ifdef CONFIG_HIGHMEM
 660                        MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
 661                                (PAGE_SIZE)),
 662#endif
 663                        MLM(MODULES_VADDR, MODULES_END),
 664
 665                        MLK_ROUNDUP(__init_begin, __init_end),
 666                        MLK_ROUNDUP(_text, _etext),
 667                        MLK_ROUNDUP(_sdata, _edata),
 668                        MLK_ROUNDUP(__bss_start, __bss_stop));
 669
 670#undef MLK
 671#undef MLM
 672#undef MLK_ROUNDUP
 673
 674        /*
 675         * Check boundaries twice: Some fundamental inconsistencies can
 676         * be detected at build time already.
 677         */
 678#ifdef CONFIG_MMU
 679        BUILD_BUG_ON(VMALLOC_END                        > CONSISTENT_BASE);
 680        BUG_ON(VMALLOC_END                              > CONSISTENT_BASE);
 681
 682        BUILD_BUG_ON(TASK_SIZE                          > MODULES_VADDR);
 683        BUG_ON(TASK_SIZE                                > MODULES_VADDR);
 684#endif
 685
 686#ifdef CONFIG_HIGHMEM
 687        BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
 688        BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE      > PAGE_OFFSET);
 689#endif
 690
 691        if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
 692                extern int sysctl_overcommit_memory;
 693                /*
 694                 * On a machine this small we won't get
 695                 * anywhere without overcommit, so turn
 696                 * it on by default.
 697                 */
 698                sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
 699        }
 700}
 701
 702void free_initmem(void)
 703{
 704#ifdef CONFIG_HAVE_TCM
 705        extern char __tcm_start, __tcm_end;
 706
 707        totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
 708                                    __phys_to_pfn(__pa(&__tcm_end)),
 709                                    "TCM link");
 710#endif
 711
 712        if (!machine_is_integrator() && !machine_is_cintegrator())
 713                totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
 714                                            __phys_to_pfn(__pa(__init_end)),
 715                                            "init");
 716}
 717
 718#ifdef CONFIG_BLK_DEV_INITRD
 719
 720static int keep_initrd;
 721
 722void free_initrd_mem(unsigned long start, unsigned long end)
 723{
 724        if (!keep_initrd)
 725                totalram_pages += free_area(__phys_to_pfn(__pa(start)),
 726                                            __phys_to_pfn(__pa(end)),
 727                                            "initrd");
 728}
 729
 730static int __init keepinitrd_setup(char *__unused)
 731{
 732        keep_initrd = 1;
 733        return 1;
 734}
 735
 736__setup("keepinitrd", keepinitrd_setup);
 737#endif
 738