linux/arch/x86/mm/init.c
<<
>>
Prefs
   1#include <linux/gfp.h>
   2#include <linux/initrd.h>
   3#include <linux/ioport.h>
   4#include <linux/swap.h>
   5#include <linux/memblock.h>
   6#include <linux/bootmem.h>      /* for max_low_pfn */
   7
   8#include <asm/cacheflush.h>
   9#include <asm/e820.h>
  10#include <asm/init.h>
  11#include <asm/page.h>
  12#include <asm/page_types.h>
  13#include <asm/sections.h>
  14#include <asm/setup.h>
  15#include <asm/tlbflush.h>
  16#include <asm/tlb.h>
  17#include <asm/proto.h>
  18#include <asm/dma.h>            /* for MAX_DMA_PFN */
  19#include <asm/microcode.h>
  20
  21#include "mm_internal.h"
  22
  23static unsigned long __initdata pgt_buf_start;
  24static unsigned long __initdata pgt_buf_end;
  25static unsigned long __initdata pgt_buf_top;
  26
  27static unsigned long min_pfn_mapped;
  28
  29static bool __initdata can_use_brk_pgt = true;
  30
  31/*
  32 * Pages returned are already directly mapped.
  33 *
  34 * Changing that is likely to break Xen, see commit:
  35 *
  36 *    279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
  37 *
  38 * for detailed information.
  39 */
  40__ref void *alloc_low_pages(unsigned int num)
  41{
  42        unsigned long pfn;
  43        int i;
  44
  45        if (after_bootmem) {
  46                unsigned int order;
  47
  48                order = get_order((unsigned long)num << PAGE_SHIFT);
  49                return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
  50                                                __GFP_ZERO, order);
  51        }
  52
  53        if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
  54                unsigned long ret;
  55                if (min_pfn_mapped >= max_pfn_mapped)
  56                        panic("alloc_low_pages: ran out of memory");
  57                ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
  58                                        max_pfn_mapped << PAGE_SHIFT,
  59                                        PAGE_SIZE * num , PAGE_SIZE);
  60                if (!ret)
  61                        panic("alloc_low_pages: can not alloc memory");
  62                memblock_reserve(ret, PAGE_SIZE * num);
  63                pfn = ret >> PAGE_SHIFT;
  64        } else {
  65                pfn = pgt_buf_end;
  66                pgt_buf_end += num;
  67                printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n",
  68                        pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
  69        }
  70
  71        for (i = 0; i < num; i++) {
  72                void *adr;
  73
  74                adr = __va((pfn + i) << PAGE_SHIFT);
  75                clear_page(adr);
  76        }
  77
  78        return __va(pfn << PAGE_SHIFT);
  79}
  80
  81/* need 3 4k for initial PMD_SIZE,  3 4k for 0-ISA_END_ADDRESS */
  82#define INIT_PGT_BUF_SIZE       (6 * PAGE_SIZE)
  83RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
  84void  __init early_alloc_pgt_buf(void)
  85{
  86        unsigned long tables = INIT_PGT_BUF_SIZE;
  87        phys_addr_t base;
  88
  89        base = __pa(extend_brk(tables, PAGE_SIZE));
  90
  91        pgt_buf_start = base >> PAGE_SHIFT;
  92        pgt_buf_end = pgt_buf_start;
  93        pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
  94}
  95
  96int after_bootmem;
  97
  98int direct_gbpages
  99#ifdef CONFIG_DIRECT_GBPAGES
 100                                = 1
 101#endif
 102;
 103
 104static void __init init_gbpages(void)
 105{
 106#ifdef CONFIG_X86_64
 107        if (direct_gbpages && cpu_has_gbpages)
 108                printk(KERN_INFO "Using GB pages for direct mapping\n");
 109        else
 110                direct_gbpages = 0;
 111#endif
 112}
 113
 114struct map_range {
 115        unsigned long start;
 116        unsigned long end;
 117        unsigned page_size_mask;
 118};
 119
 120static int page_size_mask;
 121
 122static void __init probe_page_size_mask(void)
 123{
 124        init_gbpages();
 125
 126#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
 127        /*
 128         * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
 129         * This will simplify cpa(), which otherwise needs to support splitting
 130         * large pages into small in interrupt context, etc.
 131         */
 132        if (direct_gbpages)
 133                page_size_mask |= 1 << PG_LEVEL_1G;
 134        if (cpu_has_pse)
 135                page_size_mask |= 1 << PG_LEVEL_2M;
 136#endif
 137
 138        /* Enable PSE if available */
 139        if (cpu_has_pse)
 140                set_in_cr4(X86_CR4_PSE);
 141
 142        /* Enable PGE if available */
 143        if (cpu_has_pge) {
 144                set_in_cr4(X86_CR4_PGE);
 145                __supported_pte_mask |= _PAGE_GLOBAL;
 146        }
 147}
 148
 149#ifdef CONFIG_X86_32
 150#define NR_RANGE_MR 3
 151#else /* CONFIG_X86_64 */
 152#define NR_RANGE_MR 5
 153#endif
 154
 155static int __meminit save_mr(struct map_range *mr, int nr_range,
 156                             unsigned long start_pfn, unsigned long end_pfn,
 157                             unsigned long page_size_mask)
 158{
 159        if (start_pfn < end_pfn) {
 160                if (nr_range >= NR_RANGE_MR)
 161                        panic("run out of range for init_memory_mapping\n");
 162                mr[nr_range].start = start_pfn<<PAGE_SHIFT;
 163                mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
 164                mr[nr_range].page_size_mask = page_size_mask;
 165                nr_range++;
 166        }
 167
 168        return nr_range;
 169}
 170
 171/*
 172 * adjust the page_size_mask for small range to go with
 173 *      big page size instead small one if nearby are ram too.
 174 */
 175static void __init_refok adjust_range_page_size_mask(struct map_range *mr,
 176                                                         int nr_range)
 177{
 178        int i;
 179
 180        for (i = 0; i < nr_range; i++) {
 181                if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
 182                    !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
 183                        unsigned long start = round_down(mr[i].start, PMD_SIZE);
 184                        unsigned long end = round_up(mr[i].end, PMD_SIZE);
 185
 186#ifdef CONFIG_X86_32
 187                        if ((end >> PAGE_SHIFT) > max_low_pfn)
 188                                continue;
 189#endif
 190
 191                        if (memblock_is_region_memory(start, end - start))
 192                                mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
 193                }
 194                if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
 195                    !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
 196                        unsigned long start = round_down(mr[i].start, PUD_SIZE);
 197                        unsigned long end = round_up(mr[i].end, PUD_SIZE);
 198
 199                        if (memblock_is_region_memory(start, end - start))
 200                                mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
 201                }
 202        }
 203}
 204
 205static int __meminit split_mem_range(struct map_range *mr, int nr_range,
 206                                     unsigned long start,
 207                                     unsigned long end)
 208{
 209        unsigned long start_pfn, end_pfn, limit_pfn;
 210        unsigned long pfn;
 211        int i;
 212
 213        limit_pfn = PFN_DOWN(end);
 214
 215        /* head if not big page alignment ? */
 216        pfn = start_pfn = PFN_DOWN(start);
 217#ifdef CONFIG_X86_32
 218        /*
 219         * Don't use a large page for the first 2/4MB of memory
 220         * because there are often fixed size MTRRs in there
 221         * and overlapping MTRRs into large pages can cause
 222         * slowdowns.
 223         */
 224        if (pfn == 0)
 225                end_pfn = PFN_DOWN(PMD_SIZE);
 226        else
 227                end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 228#else /* CONFIG_X86_64 */
 229        end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 230#endif
 231        if (end_pfn > limit_pfn)
 232                end_pfn = limit_pfn;
 233        if (start_pfn < end_pfn) {
 234                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
 235                pfn = end_pfn;
 236        }
 237
 238        /* big page (2M) range */
 239        start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 240#ifdef CONFIG_X86_32
 241        end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
 242#else /* CONFIG_X86_64 */
 243        end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
 244        if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
 245                end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
 246#endif
 247
 248        if (start_pfn < end_pfn) {
 249                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
 250                                page_size_mask & (1<<PG_LEVEL_2M));
 251                pfn = end_pfn;
 252        }
 253
 254#ifdef CONFIG_X86_64
 255        /* big page (1G) range */
 256        start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
 257        end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
 258        if (start_pfn < end_pfn) {
 259                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
 260                                page_size_mask &
 261                                 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
 262                pfn = end_pfn;
 263        }
 264
 265        /* tail is not big page (1G) alignment */
 266        start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 267        end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
 268        if (start_pfn < end_pfn) {
 269                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
 270                                page_size_mask & (1<<PG_LEVEL_2M));
 271                pfn = end_pfn;
 272        }
 273#endif
 274
 275        /* tail is not big page (2M) alignment */
 276        start_pfn = pfn;
 277        end_pfn = limit_pfn;
 278        nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
 279
 280        if (!after_bootmem)
 281                adjust_range_page_size_mask(mr, nr_range);
 282
 283        /* try to merge same page size and continuous */
 284        for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
 285                unsigned long old_start;
 286                if (mr[i].end != mr[i+1].start ||
 287                    mr[i].page_size_mask != mr[i+1].page_size_mask)
 288                        continue;
 289                /* move it */
 290                old_start = mr[i].start;
 291                memmove(&mr[i], &mr[i+1],
 292                        (nr_range - 1 - i) * sizeof(struct map_range));
 293                mr[i--].start = old_start;
 294                nr_range--;
 295        }
 296
 297        for (i = 0; i < nr_range; i++)
 298                printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
 299                                mr[i].start, mr[i].end - 1,
 300                        (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
 301                         (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
 302
 303        return nr_range;
 304}
 305
 306struct range pfn_mapped[E820_X_MAX];
 307int nr_pfn_mapped;
 308
 309static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
 310{
 311        nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_X_MAX,
 312                                             nr_pfn_mapped, start_pfn, end_pfn);
 313        nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_X_MAX);
 314
 315        max_pfn_mapped = max(max_pfn_mapped, end_pfn);
 316
 317        if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
 318                max_low_pfn_mapped = max(max_low_pfn_mapped,
 319                                         min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
 320}
 321
 322bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
 323{
 324        int i;
 325
 326        for (i = 0; i < nr_pfn_mapped; i++)
 327                if ((start_pfn >= pfn_mapped[i].start) &&
 328                    (end_pfn <= pfn_mapped[i].end))
 329                        return true;
 330
 331        return false;
 332}
 333
 334/*
 335 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
 336 * This runs before bootmem is initialized and gets pages directly from
 337 * the physical memory. To access them they are temporarily mapped.
 338 */
 339unsigned long __init_refok init_memory_mapping(unsigned long start,
 340                                               unsigned long end)
 341{
 342        struct map_range mr[NR_RANGE_MR];
 343        unsigned long ret = 0;
 344        int nr_range, i;
 345
 346        pr_info("init_memory_mapping: [mem %#010lx-%#010lx]\n",
 347               start, end - 1);
 348
 349        memset(mr, 0, sizeof(mr));
 350        nr_range = split_mem_range(mr, 0, start, end);
 351
 352        for (i = 0; i < nr_range; i++)
 353                ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
 354                                                   mr[i].page_size_mask);
 355
 356        add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
 357
 358        return ret >> PAGE_SHIFT;
 359}
 360
 361/*
 362 * We need to iterate through the E820 memory map and create direct mappings
 363 * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
 364 * create direct mappings for all pfns from [0 to max_low_pfn) and
 365 * [4GB to max_pfn) because of possible memory holes in high addresses
 366 * that cannot be marked as UC by fixed/variable range MTRRs.
 367 * Depending on the alignment of E820 ranges, this may possibly result
 368 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
 369 *
 370 * init_mem_mapping() calls init_range_memory_mapping() with big range.
 371 * That range would have hole in the middle or ends, and only ram parts
 372 * will be mapped in init_range_memory_mapping().
 373 */
 374static unsigned long __init init_range_memory_mapping(
 375                                           unsigned long r_start,
 376                                           unsigned long r_end)
 377{
 378        unsigned long start_pfn, end_pfn;
 379        unsigned long mapped_ram_size = 0;
 380        int i;
 381
 382        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
 383                u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
 384                u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
 385                if (start >= end)
 386                        continue;
 387
 388                /*
 389                 * if it is overlapping with brk pgt, we need to
 390                 * alloc pgt buf from memblock instead.
 391                 */
 392                can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
 393                                    min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
 394                init_memory_mapping(start, end);
 395                mapped_ram_size += end - start;
 396                can_use_brk_pgt = true;
 397        }
 398
 399        return mapped_ram_size;
 400}
 401
 402static unsigned long __init get_new_step_size(unsigned long step_size)
 403{
 404        /*
 405         * Explain why we shift by 5 and why we don't have to worry about
 406         * 'step_size << 5' overflowing:
 407         *
 408         * initial mapped size is PMD_SIZE (2M).
 409         * We can not set step_size to be PUD_SIZE (1G) yet.
 410         * In worse case, when we cross the 1G boundary, and
 411         * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
 412         * to map 1G range with PTE. Use 5 as shift for now.
 413         *
 414         * Don't need to worry about overflow, on 32bit, when step_size
 415         * is 0, round_down() returns 0 for start, and that turns it
 416         * into 0x100000000ULL.
 417         */
 418        return step_size << 5;
 419}
 420
 421/**
 422 * memory_map_top_down - Map [map_start, map_end) top down
 423 * @map_start: start address of the target memory range
 424 * @map_end: end address of the target memory range
 425 *
 426 * This function will setup direct mapping for memory range
 427 * [map_start, map_end) in top-down. That said, the page tables
 428 * will be allocated at the end of the memory, and we map the
 429 * memory in top-down.
 430 */
 431static void __init memory_map_top_down(unsigned long map_start,
 432                                       unsigned long map_end)
 433{
 434        unsigned long real_end, start, last_start;
 435        unsigned long step_size;
 436        unsigned long addr;
 437        unsigned long mapped_ram_size = 0;
 438        unsigned long new_mapped_ram_size;
 439
 440        /* xen has big range in reserved near end of ram, skip it at first.*/
 441        addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
 442        real_end = addr + PMD_SIZE;
 443
 444        /* step_size need to be small so pgt_buf from BRK could cover it */
 445        step_size = PMD_SIZE;
 446        max_pfn_mapped = 0; /* will get exact value next */
 447        min_pfn_mapped = real_end >> PAGE_SHIFT;
 448        last_start = start = real_end;
 449
 450        /*
 451         * We start from the top (end of memory) and go to the bottom.
 452         * The memblock_find_in_range() gets us a block of RAM from the
 453         * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
 454         * for page table.
 455         */
 456        while (last_start > map_start) {
 457                if (last_start > step_size) {
 458                        start = round_down(last_start - 1, step_size);
 459                        if (start < map_start)
 460                                start = map_start;
 461                } else
 462                        start = map_start;
 463                new_mapped_ram_size = init_range_memory_mapping(start,
 464                                                        last_start);
 465                last_start = start;
 466                min_pfn_mapped = last_start >> PAGE_SHIFT;
 467                /* only increase step_size after big range get mapped */
 468                if (new_mapped_ram_size > mapped_ram_size)
 469                        step_size = get_new_step_size(step_size);
 470                mapped_ram_size += new_mapped_ram_size;
 471        }
 472
 473        if (real_end < map_end)
 474                init_range_memory_mapping(real_end, map_end);
 475}
 476
 477/**
 478 * memory_map_bottom_up - Map [map_start, map_end) bottom up
 479 * @map_start: start address of the target memory range
 480 * @map_end: end address of the target memory range
 481 *
 482 * This function will setup direct mapping for memory range
 483 * [map_start, map_end) in bottom-up. Since we have limited the
 484 * bottom-up allocation above the kernel, the page tables will
 485 * be allocated just above the kernel and we map the memory
 486 * in [map_start, map_end) in bottom-up.
 487 */
 488static void __init memory_map_bottom_up(unsigned long map_start,
 489                                        unsigned long map_end)
 490{
 491        unsigned long next, new_mapped_ram_size, start;
 492        unsigned long mapped_ram_size = 0;
 493        /* step_size need to be small so pgt_buf from BRK could cover it */
 494        unsigned long step_size = PMD_SIZE;
 495
 496        start = map_start;
 497        min_pfn_mapped = start >> PAGE_SHIFT;
 498
 499        /*
 500         * We start from the bottom (@map_start) and go to the top (@map_end).
 501         * The memblock_find_in_range() gets us a block of RAM from the
 502         * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
 503         * for page table.
 504         */
 505        while (start < map_end) {
 506                if (map_end - start > step_size) {
 507                        next = round_up(start + 1, step_size);
 508                        if (next > map_end)
 509                                next = map_end;
 510                } else
 511                        next = map_end;
 512
 513                new_mapped_ram_size = init_range_memory_mapping(start, next);
 514                start = next;
 515
 516                if (new_mapped_ram_size > mapped_ram_size)
 517                        step_size = get_new_step_size(step_size);
 518                mapped_ram_size += new_mapped_ram_size;
 519        }
 520}
 521
 522void __init init_mem_mapping(void)
 523{
 524        unsigned long end;
 525
 526        probe_page_size_mask();
 527
 528#ifdef CONFIG_X86_64
 529        end = max_pfn << PAGE_SHIFT;
 530#else
 531        end = max_low_pfn << PAGE_SHIFT;
 532#endif
 533
 534        /* the ISA range is always mapped regardless of memory holes */
 535        init_memory_mapping(0, ISA_END_ADDRESS);
 536
 537        /*
 538         * If the allocation is in bottom-up direction, we setup direct mapping
 539         * in bottom-up, otherwise we setup direct mapping in top-down.
 540         */
 541        if (memblock_bottom_up()) {
 542                unsigned long kernel_end = __pa_symbol(_end);
 543
 544                /*
 545                 * we need two separate calls here. This is because we want to
 546                 * allocate page tables above the kernel. So we first map
 547                 * [kernel_end, end) to make memory above the kernel be mapped
 548                 * as soon as possible. And then use page tables allocated above
 549                 * the kernel to map [ISA_END_ADDRESS, kernel_end).
 550                 */
 551                memory_map_bottom_up(kernel_end, end);
 552                memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
 553        } else {
 554                memory_map_top_down(ISA_END_ADDRESS, end);
 555        }
 556
 557#ifdef CONFIG_X86_64
 558        if (max_pfn > max_low_pfn) {
 559                /* can we preseve max_low_pfn ?*/
 560                max_low_pfn = max_pfn;
 561        }
 562#else
 563        early_ioremap_page_table_range_init();
 564#endif
 565
 566        load_cr3(swapper_pg_dir);
 567        __flush_tlb_all();
 568
 569        early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
 570}
 571
 572/*
 573 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
 574 * is valid. The argument is a physical page number.
 575 *
 576 *
 577 * On x86, access has to be given to the first megabyte of ram because that area
 578 * contains bios code and data regions used by X and dosemu and similar apps.
 579 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
 580 * mmio resources as well as potential bios/acpi data regions.
 581 */
 582int devmem_is_allowed(unsigned long pagenr)
 583{
 584        if (pagenr < 256)
 585                return 1;
 586        if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
 587                return 0;
 588        if (!page_is_ram(pagenr))
 589                return 1;
 590        return 0;
 591}
 592
 593void free_init_pages(char *what, unsigned long begin, unsigned long end)
 594{
 595        unsigned long begin_aligned, end_aligned;
 596
 597        /* Make sure boundaries are page aligned */
 598        begin_aligned = PAGE_ALIGN(begin);
 599        end_aligned   = end & PAGE_MASK;
 600
 601        if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
 602                begin = begin_aligned;
 603                end   = end_aligned;
 604        }
 605
 606        if (begin >= end)
 607                return;
 608
 609        /*
 610         * If debugging page accesses then do not free this memory but
 611         * mark them not present - any buggy init-section access will
 612         * create a kernel page fault:
 613         */
 614#ifdef CONFIG_DEBUG_PAGEALLOC
 615        printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n",
 616                begin, end - 1);
 617        set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
 618#else
 619        /*
 620         * We just marked the kernel text read only above, now that
 621         * we are going to free part of that, we need to make that
 622         * writeable and non-executable first.
 623         */
 624        set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
 625        set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
 626
 627        free_reserved_area((void *)begin, (void *)end, POISON_FREE_INITMEM, what);
 628#endif
 629}
 630
 631void free_initmem(void)
 632{
 633        free_init_pages("unused kernel",
 634                        (unsigned long)(&__init_begin),
 635                        (unsigned long)(&__init_end));
 636}
 637
 638#ifdef CONFIG_BLK_DEV_INITRD
 639void __init free_initrd_mem(unsigned long start, unsigned long end)
 640{
 641#ifdef CONFIG_MICROCODE_EARLY
 642        /*
 643         * Remember, initrd memory may contain microcode or other useful things.
 644         * Before we lose initrd mem, we need to find a place to hold them
 645         * now that normal virtual memory is enabled.
 646         */
 647        save_microcode_in_initrd();
 648#endif
 649
 650        /*
 651         * end could be not aligned, and We can not align that,
 652         * decompresser could be confused by aligned initrd_end
 653         * We already reserve the end partial page before in
 654         *   - i386_start_kernel()
 655         *   - x86_64_start_kernel()
 656         *   - relocate_initrd()
 657         * So here We can do PAGE_ALIGN() safely to get partial page to be freed
 658         */
 659        free_init_pages("initrd", start, PAGE_ALIGN(end));
 660}
 661#endif
 662
 663void __init zone_sizes_init(void)
 664{
 665        unsigned long max_zone_pfns[MAX_NR_ZONES];
 666
 667        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 668
 669#ifdef CONFIG_ZONE_DMA
 670        max_zone_pfns[ZONE_DMA]         = MAX_DMA_PFN;
 671#endif
 672#ifdef CONFIG_ZONE_DMA32
 673        max_zone_pfns[ZONE_DMA32]       = MAX_DMA32_PFN;
 674#endif
 675        max_zone_pfns[ZONE_NORMAL]      = max_low_pfn;
 676#ifdef CONFIG_HIGHMEM
 677        max_zone_pfns[ZONE_HIGHMEM]     = max_pfn;
 678#endif
 679
 680        free_area_init_nodes(max_zone_pfns);
 681}
 682
 683