linux/arch/x86/mm/init_64.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/x86_64/mm/init.c
   3 *
   4 *  Copyright (C) 1995  Linus Torvalds
   5 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
   6 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
   7 */
   8
   9#include <linux/signal.h>
  10#include <linux/sched.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/string.h>
  14#include <linux/types.h>
  15#include <linux/ptrace.h>
  16#include <linux/mman.h>
  17#include <linux/mm.h>
  18#include <linux/swap.h>
  19#include <linux/smp.h>
  20#include <linux/init.h>
  21#include <linux/initrd.h>
  22#include <linux/pagemap.h>
  23#include <linux/bootmem.h>
  24#include <linux/memblock.h>
  25#include <linux/proc_fs.h>
  26#include <linux/pci.h>
  27#include <linux/pfn.h>
  28#include <linux/poison.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/module.h>
  31#include <linux/memory.h>
  32#include <linux/memory_hotplug.h>
  33#include <linux/nmi.h>
  34#include <linux/gfp.h>
  35#include <linux/kcore.h>
  36
  37#include <asm/processor.h>
  38#include <asm/bios_ebda.h>
  39#include <asm/uaccess.h>
  40#include <asm/pgtable.h>
  41#include <asm/pgalloc.h>
  42#include <asm/dma.h>
  43#include <asm/fixmap.h>
  44#include <asm/e820.h>
  45#include <asm/apic.h>
  46#include <asm/tlb.h>
  47#include <asm/mmu_context.h>
  48#include <asm/proto.h>
  49#include <asm/smp.h>
  50#include <asm/sections.h>
  51#include <asm/kdebug.h>
  52#include <asm/numa.h>
  53#include <asm/cacheflush.h>
  54#include <asm/init.h>
  55#include <asm/uv/uv.h>
  56#include <asm/setup.h>
  57
  58#include "mm_internal.h"
  59
  60static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
  61                           unsigned long addr, unsigned long end)
  62{
  63        addr &= PMD_MASK;
  64        for (; addr < end; addr += PMD_SIZE) {
  65                pmd_t *pmd = pmd_page + pmd_index(addr);
  66
  67                if (!pmd_present(*pmd))
  68                        set_pmd(pmd, __pmd(addr | pmd_flag));
  69        }
  70}
  71static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
  72                          unsigned long addr, unsigned long end)
  73{
  74        unsigned long next;
  75
  76        for (; addr < end; addr = next) {
  77                pud_t *pud = pud_page + pud_index(addr);
  78                pmd_t *pmd;
  79
  80                next = (addr & PUD_MASK) + PUD_SIZE;
  81                if (next > end)
  82                        next = end;
  83
  84                if (pud_present(*pud)) {
  85                        pmd = pmd_offset(pud, 0);
  86                        ident_pmd_init(info->pmd_flag, pmd, addr, next);
  87                        continue;
  88                }
  89                pmd = (pmd_t *)info->alloc_pgt_page(info->context);
  90                if (!pmd)
  91                        return -ENOMEM;
  92                ident_pmd_init(info->pmd_flag, pmd, addr, next);
  93                set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
  94        }
  95
  96        return 0;
  97}
  98
  99int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
 100                              unsigned long addr, unsigned long end)
 101{
 102        unsigned long next;
 103        int result;
 104        int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
 105
 106        for (; addr < end; addr = next) {
 107                pgd_t *pgd = pgd_page + pgd_index(addr) + off;
 108                pud_t *pud;
 109
 110                next = (addr & PGDIR_MASK) + PGDIR_SIZE;
 111                if (next > end)
 112                        next = end;
 113
 114                if (pgd_present(*pgd)) {
 115                        pud = pud_offset(pgd, 0);
 116                        result = ident_pud_init(info, pud, addr, next);
 117                        if (result)
 118                                return result;
 119                        continue;
 120                }
 121
 122                pud = (pud_t *)info->alloc_pgt_page(info->context);
 123                if (!pud)
 124                        return -ENOMEM;
 125                result = ident_pud_init(info, pud, addr, next);
 126                if (result)
 127                        return result;
 128                set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
 129        }
 130
 131        return 0;
 132}
 133
 134static int __init parse_direct_gbpages_off(char *arg)
 135{
 136        direct_gbpages = 0;
 137        return 0;
 138}
 139early_param("nogbpages", parse_direct_gbpages_off);
 140
 141static int __init parse_direct_gbpages_on(char *arg)
 142{
 143        direct_gbpages = 1;
 144        return 0;
 145}
 146early_param("gbpages", parse_direct_gbpages_on);
 147
 148/*
 149 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
 150 * physical space so we can cache the place of the first one and move
 151 * around without checking the pgd every time.
 152 */
 153
 154pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
 155EXPORT_SYMBOL_GPL(__supported_pte_mask);
 156
 157int force_personality32;
 158
 159/*
 160 * noexec32=on|off
 161 * Control non executable heap for 32bit processes.
 162 * To control the stack too use noexec=off
 163 *
 164 * on   PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
 165 * off  PROT_READ implies PROT_EXEC
 166 */
 167static int __init nonx32_setup(char *str)
 168{
 169        if (!strcmp(str, "on"))
 170                force_personality32 &= ~READ_IMPLIES_EXEC;
 171        else if (!strcmp(str, "off"))
 172                force_personality32 |= READ_IMPLIES_EXEC;
 173        return 1;
 174}
 175__setup("noexec32=", nonx32_setup);
 176
 177/*
 178 * When memory was added/removed make sure all the processes MM have
 179 * suitable PGD entries in the local PGD level page.
 180 */
 181void sync_global_pgds(unsigned long start, unsigned long end)
 182{
 183        unsigned long address;
 184
 185        for (address = start; address <= end; address += PGDIR_SIZE) {
 186                const pgd_t *pgd_ref = pgd_offset_k(address);
 187                struct page *page;
 188
 189                if (pgd_none(*pgd_ref))
 190                        continue;
 191
 192                spin_lock(&pgd_lock);
 193                list_for_each_entry(page, &pgd_list, lru) {
 194                        pgd_t *pgd;
 195                        spinlock_t *pgt_lock;
 196
 197                        pgd = (pgd_t *)page_address(page) + pgd_index(address);
 198                        /* the pgt_lock only for Xen */
 199                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 200                        spin_lock(pgt_lock);
 201
 202                        if (pgd_none(*pgd))
 203                                set_pgd(pgd, *pgd_ref);
 204                        else
 205                                BUG_ON(pgd_page_vaddr(*pgd)
 206                                       != pgd_page_vaddr(*pgd_ref));
 207
 208                        spin_unlock(pgt_lock);
 209                }
 210                spin_unlock(&pgd_lock);
 211        }
 212}
 213
 214/*
 215 * NOTE: This function is marked __ref because it calls __init function
 216 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 217 */
 218static __ref void *spp_getpage(void)
 219{
 220        void *ptr;
 221
 222        if (after_bootmem)
 223                ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
 224        else
 225                ptr = alloc_bootmem_pages(PAGE_SIZE);
 226
 227        if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
 228                panic("set_pte_phys: cannot allocate page data %s\n",
 229                        after_bootmem ? "after bootmem" : "");
 230        }
 231
 232        pr_debug("spp_getpage %p\n", ptr);
 233
 234        return ptr;
 235}
 236
 237static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
 238{
 239        if (pgd_none(*pgd)) {
 240                pud_t *pud = (pud_t *)spp_getpage();
 241                pgd_populate(&init_mm, pgd, pud);
 242                if (pud != pud_offset(pgd, 0))
 243                        printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
 244                               pud, pud_offset(pgd, 0));
 245        }
 246        return pud_offset(pgd, vaddr);
 247}
 248
 249static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
 250{
 251        if (pud_none(*pud)) {
 252                pmd_t *pmd = (pmd_t *) spp_getpage();
 253                pud_populate(&init_mm, pud, pmd);
 254                if (pmd != pmd_offset(pud, 0))
 255                        printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
 256                               pmd, pmd_offset(pud, 0));
 257        }
 258        return pmd_offset(pud, vaddr);
 259}
 260
 261static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
 262{
 263        if (pmd_none(*pmd)) {
 264                pte_t *pte = (pte_t *) spp_getpage();
 265                pmd_populate_kernel(&init_mm, pmd, pte);
 266                if (pte != pte_offset_kernel(pmd, 0))
 267                        printk(KERN_ERR "PAGETABLE BUG #02!\n");
 268        }
 269        return pte_offset_kernel(pmd, vaddr);
 270}
 271
 272void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
 273{
 274        pud_t *pud;
 275        pmd_t *pmd;
 276        pte_t *pte;
 277
 278        pud = pud_page + pud_index(vaddr);
 279        pmd = fill_pmd(pud, vaddr);
 280        pte = fill_pte(pmd, vaddr);
 281
 282        set_pte(pte, new_pte);
 283
 284        /*
 285         * It's enough to flush this one mapping.
 286         * (PGE mappings get flushed as well)
 287         */
 288        __flush_tlb_one(vaddr);
 289}
 290
 291void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
 292{
 293        pgd_t *pgd;
 294        pud_t *pud_page;
 295
 296        pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
 297
 298        pgd = pgd_offset_k(vaddr);
 299        if (pgd_none(*pgd)) {
 300                printk(KERN_ERR
 301                        "PGD FIXMAP MISSING, it should be setup in head.S!\n");
 302                return;
 303        }
 304        pud_page = (pud_t*)pgd_page_vaddr(*pgd);
 305        set_pte_vaddr_pud(pud_page, vaddr, pteval);
 306}
 307
 308pmd_t * __init populate_extra_pmd(unsigned long vaddr)
 309{
 310        pgd_t *pgd;
 311        pud_t *pud;
 312
 313        pgd = pgd_offset_k(vaddr);
 314        pud = fill_pud(pgd, vaddr);
 315        return fill_pmd(pud, vaddr);
 316}
 317
 318pte_t * __init populate_extra_pte(unsigned long vaddr)
 319{
 320        pmd_t *pmd;
 321
 322        pmd = populate_extra_pmd(vaddr);
 323        return fill_pte(pmd, vaddr);
 324}
 325
 326/*
 327 * Create large page table mappings for a range of physical addresses.
 328 */
 329static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
 330                                                pgprot_t prot)
 331{
 332        pgd_t *pgd;
 333        pud_t *pud;
 334        pmd_t *pmd;
 335
 336        BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
 337        for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
 338                pgd = pgd_offset_k((unsigned long)__va(phys));
 339                if (pgd_none(*pgd)) {
 340                        pud = (pud_t *) spp_getpage();
 341                        set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
 342                                                _PAGE_USER));
 343                }
 344                pud = pud_offset(pgd, (unsigned long)__va(phys));
 345                if (pud_none(*pud)) {
 346                        pmd = (pmd_t *) spp_getpage();
 347                        set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
 348                                                _PAGE_USER));
 349                }
 350                pmd = pmd_offset(pud, phys);
 351                BUG_ON(!pmd_none(*pmd));
 352                set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
 353        }
 354}
 355
 356void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
 357{
 358        __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
 359}
 360
 361void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
 362{
 363        __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
 364}
 365
 366/*
 367 * The head.S code sets up the kernel high mapping:
 368 *
 369 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
 370 *
 371 * phys_base holds the negative offset to the kernel, which is added
 372 * to the compile time generated pmds. This results in invalid pmds up
 373 * to the point where we hit the physaddr 0 mapping.
 374 *
 375 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 376 * is rounded up to the 2MB boundary. This catches the invalid pmds as
 377 * well, as they are located before _text:
 378 */
 379void __init cleanup_highmap(void)
 380{
 381        unsigned long vaddr = __START_KERNEL_map;
 382        unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
 383        unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
 384        pmd_t *pmd = level2_kernel_pgt;
 385
 386        /*
 387         * Native path, max_pfn_mapped is not set yet.
 388         * Xen has valid max_pfn_mapped set in
 389         *      arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
 390         */
 391        if (max_pfn_mapped)
 392                vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
 393
 394        for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
 395                if (pmd_none(*pmd))
 396                        continue;
 397                if (vaddr < (unsigned long) _text || vaddr > end)
 398                        set_pmd(pmd, __pmd(0));
 399        }
 400}
 401
 402static unsigned long __meminit
 403phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
 404              pgprot_t prot)
 405{
 406        unsigned long pages = 0, next;
 407        unsigned long last_map_addr = end;
 408        int i;
 409
 410        pte_t *pte = pte_page + pte_index(addr);
 411
 412        for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
 413                next = (addr & PAGE_MASK) + PAGE_SIZE;
 414                if (addr >= end) {
 415                        if (!after_bootmem &&
 416                            !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
 417                            !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
 418                                set_pte(pte, __pte(0));
 419                        continue;
 420                }
 421
 422                /*
 423                 * We will re-use the existing mapping.
 424                 * Xen for example has some special requirements, like mapping
 425                 * pagetable pages as RO. So assume someone who pre-setup
 426                 * these mappings are more intelligent.
 427                 */
 428                if (pte_val(*pte)) {
 429                        if (!after_bootmem)
 430                                pages++;
 431                        continue;
 432                }
 433
 434                if (0)
 435                        printk("   pte=%p addr=%lx pte=%016lx\n",
 436                               pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
 437                pages++;
 438                set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
 439                last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
 440        }
 441
 442        update_page_count(PG_LEVEL_4K, pages);
 443
 444        return last_map_addr;
 445}
 446
 447static unsigned long __meminit
 448phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
 449              unsigned long page_size_mask, pgprot_t prot)
 450{
 451        unsigned long pages = 0, next;
 452        unsigned long last_map_addr = end;
 453
 454        int i = pmd_index(address);
 455
 456        for (; i < PTRS_PER_PMD; i++, address = next) {
 457                pmd_t *pmd = pmd_page + pmd_index(address);
 458                pte_t *pte;
 459                pgprot_t new_prot = prot;
 460
 461                next = (address & PMD_MASK) + PMD_SIZE;
 462                if (address >= end) {
 463                        if (!after_bootmem &&
 464                            !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
 465                            !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
 466                                set_pmd(pmd, __pmd(0));
 467                        continue;
 468                }
 469
 470                if (pmd_val(*pmd)) {
 471                        if (!pmd_large(*pmd)) {
 472                                spin_lock(&init_mm.page_table_lock);
 473                                pte = (pte_t *)pmd_page_vaddr(*pmd);
 474                                last_map_addr = phys_pte_init(pte, address,
 475                                                                end, prot);
 476                                spin_unlock(&init_mm.page_table_lock);
 477                                continue;
 478                        }
 479                        /*
 480                         * If we are ok with PG_LEVEL_2M mapping, then we will
 481                         * use the existing mapping,
 482                         *
 483                         * Otherwise, we will split the large page mapping but
 484                         * use the same existing protection bits except for
 485                         * large page, so that we don't violate Intel's TLB
 486                         * Application note (317080) which says, while changing
 487                         * the page sizes, new and old translations should
 488                         * not differ with respect to page frame and
 489                         * attributes.
 490                         */
 491                        if (page_size_mask & (1 << PG_LEVEL_2M)) {
 492                                if (!after_bootmem)
 493                                        pages++;
 494                                last_map_addr = next;
 495                                continue;
 496                        }
 497                        new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
 498                }
 499
 500                if (page_size_mask & (1<<PG_LEVEL_2M)) {
 501                        pages++;
 502                        spin_lock(&init_mm.page_table_lock);
 503                        set_pte((pte_t *)pmd,
 504                                pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
 505                                        __pgprot(pgprot_val(prot) | _PAGE_PSE)));
 506                        spin_unlock(&init_mm.page_table_lock);
 507                        last_map_addr = next;
 508                        continue;
 509                }
 510
 511                pte = alloc_low_page();
 512                last_map_addr = phys_pte_init(pte, address, end, new_prot);
 513
 514                spin_lock(&init_mm.page_table_lock);
 515                pmd_populate_kernel(&init_mm, pmd, pte);
 516                spin_unlock(&init_mm.page_table_lock);
 517        }
 518        update_page_count(PG_LEVEL_2M, pages);
 519        return last_map_addr;
 520}
 521
 522static unsigned long __meminit
 523phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
 524                         unsigned long page_size_mask)
 525{
 526        unsigned long pages = 0, next;
 527        unsigned long last_map_addr = end;
 528        int i = pud_index(addr);
 529
 530        for (; i < PTRS_PER_PUD; i++, addr = next) {
 531                pud_t *pud = pud_page + pud_index(addr);
 532                pmd_t *pmd;
 533                pgprot_t prot = PAGE_KERNEL;
 534
 535                next = (addr & PUD_MASK) + PUD_SIZE;
 536                if (addr >= end) {
 537                        if (!after_bootmem &&
 538                            !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
 539                            !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
 540                                set_pud(pud, __pud(0));
 541                        continue;
 542                }
 543
 544                if (pud_val(*pud)) {
 545                        if (!pud_large(*pud)) {
 546                                pmd = pmd_offset(pud, 0);
 547                                last_map_addr = phys_pmd_init(pmd, addr, end,
 548                                                         page_size_mask, prot);
 549                                __flush_tlb_all();
 550                                continue;
 551                        }
 552                        /*
 553                         * If we are ok with PG_LEVEL_1G mapping, then we will
 554                         * use the existing mapping.
 555                         *
 556                         * Otherwise, we will split the gbpage mapping but use
 557                         * the same existing protection  bits except for large
 558                         * page, so that we don't violate Intel's TLB
 559                         * Application note (317080) which says, while changing
 560                         * the page sizes, new and old translations should
 561                         * not differ with respect to page frame and
 562                         * attributes.
 563                         */
 564                        if (page_size_mask & (1 << PG_LEVEL_1G)) {
 565                                if (!after_bootmem)
 566                                        pages++;
 567                                last_map_addr = next;
 568                                continue;
 569                        }
 570                        prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
 571                }
 572
 573                if (page_size_mask & (1<<PG_LEVEL_1G)) {
 574                        pages++;
 575                        spin_lock(&init_mm.page_table_lock);
 576                        set_pte((pte_t *)pud,
 577                                pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
 578                                        PAGE_KERNEL_LARGE));
 579                        spin_unlock(&init_mm.page_table_lock);
 580                        last_map_addr = next;
 581                        continue;
 582                }
 583
 584                pmd = alloc_low_page();
 585                last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
 586                                              prot);
 587
 588                spin_lock(&init_mm.page_table_lock);
 589                pud_populate(&init_mm, pud, pmd);
 590                spin_unlock(&init_mm.page_table_lock);
 591        }
 592        __flush_tlb_all();
 593
 594        update_page_count(PG_LEVEL_1G, pages);
 595
 596        return last_map_addr;
 597}
 598
 599unsigned long __meminit
 600kernel_physical_mapping_init(unsigned long start,
 601                             unsigned long end,
 602                             unsigned long page_size_mask)
 603{
 604        bool pgd_changed = false;
 605        unsigned long next, last_map_addr = end;
 606        unsigned long addr;
 607
 608        start = (unsigned long)__va(start);
 609        end = (unsigned long)__va(end);
 610        addr = start;
 611
 612        for (; start < end; start = next) {
 613                pgd_t *pgd = pgd_offset_k(start);
 614                pud_t *pud;
 615
 616                next = (start & PGDIR_MASK) + PGDIR_SIZE;
 617
 618                if (pgd_val(*pgd)) {
 619                        pud = (pud_t *)pgd_page_vaddr(*pgd);
 620                        last_map_addr = phys_pud_init(pud, __pa(start),
 621                                                 __pa(end), page_size_mask);
 622                        continue;
 623                }
 624
 625                pud = alloc_low_page();
 626                last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
 627                                                 page_size_mask);
 628
 629                spin_lock(&init_mm.page_table_lock);
 630                pgd_populate(&init_mm, pgd, pud);
 631                spin_unlock(&init_mm.page_table_lock);
 632                pgd_changed = true;
 633        }
 634
 635        if (pgd_changed)
 636                sync_global_pgds(addr, end - 1);
 637
 638        __flush_tlb_all();
 639
 640        return last_map_addr;
 641}
 642
 643#ifndef CONFIG_NUMA
 644void __init initmem_init(void)
 645{
 646        memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
 647}
 648#endif
 649
 650void __init paging_init(void)
 651{
 652        sparse_memory_present_with_active_regions(MAX_NUMNODES);
 653        sparse_init();
 654
 655        /*
 656         * clear the default setting with node 0
 657         * note: don't use nodes_clear here, that is really clearing when
 658         *       numa support is not compiled in, and later node_set_state
 659         *       will not set it back.
 660         */
 661        node_clear_state(0, N_MEMORY);
 662        if (N_MEMORY != N_NORMAL_MEMORY)
 663                node_clear_state(0, N_NORMAL_MEMORY);
 664
 665        zone_sizes_init();
 666}
 667
 668/*
 669 * Memory hotplug specific functions
 670 */
 671#ifdef CONFIG_MEMORY_HOTPLUG
 672/*
 673 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 674 * updating.
 675 */
 676static void  update_end_of_memory_vars(u64 start, u64 size)
 677{
 678        unsigned long end_pfn = PFN_UP(start + size);
 679
 680        if (end_pfn > max_pfn) {
 681                max_pfn = end_pfn;
 682                max_low_pfn = end_pfn;
 683                high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
 684        }
 685}
 686
 687/*
 688 * Memory is added always to NORMAL zone. This means you will never get
 689 * additional DMA/DMA32 memory.
 690 */
 691int arch_add_memory(int nid, u64 start, u64 size)
 692{
 693        struct pglist_data *pgdat = NODE_DATA(nid);
 694        struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
 695        unsigned long start_pfn = start >> PAGE_SHIFT;
 696        unsigned long nr_pages = size >> PAGE_SHIFT;
 697        int ret;
 698
 699        init_memory_mapping(start, start + size);
 700
 701        ret = __add_pages(nid, zone, start_pfn, nr_pages);
 702        WARN_ON_ONCE(ret);
 703
 704        /* update max_pfn, max_low_pfn and high_memory */
 705        update_end_of_memory_vars(start, size);
 706
 707        return ret;
 708}
 709EXPORT_SYMBOL_GPL(arch_add_memory);
 710
 711#define PAGE_INUSE 0xFD
 712
 713static void __meminit free_pagetable(struct page *page, int order)
 714{
 715        unsigned long magic;
 716        unsigned int nr_pages = 1 << order;
 717
 718        /* bootmem page has reserved flag */
 719        if (PageReserved(page)) {
 720                __ClearPageReserved(page);
 721
 722                magic = (unsigned long)page->lru.next;
 723                if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
 724                        while (nr_pages--)
 725                                put_page_bootmem(page++);
 726                } else
 727                        while (nr_pages--)
 728                                free_reserved_page(page++);
 729        } else
 730                free_pages((unsigned long)page_address(page), order);
 731}
 732
 733static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
 734{
 735        pte_t *pte;
 736        int i;
 737
 738        for (i = 0; i < PTRS_PER_PTE; i++) {
 739                pte = pte_start + i;
 740                if (pte_val(*pte))
 741                        return;
 742        }
 743
 744        /* free a pte talbe */
 745        free_pagetable(pmd_page(*pmd), 0);
 746        spin_lock(&init_mm.page_table_lock);
 747        pmd_clear(pmd);
 748        spin_unlock(&init_mm.page_table_lock);
 749}
 750
 751static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
 752{
 753        pmd_t *pmd;
 754        int i;
 755
 756        for (i = 0; i < PTRS_PER_PMD; i++) {
 757                pmd = pmd_start + i;
 758                if (pmd_val(*pmd))
 759                        return;
 760        }
 761
 762        /* free a pmd talbe */
 763        free_pagetable(pud_page(*pud), 0);
 764        spin_lock(&init_mm.page_table_lock);
 765        pud_clear(pud);
 766        spin_unlock(&init_mm.page_table_lock);
 767}
 768
 769/* Return true if pgd is changed, otherwise return false. */
 770static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd)
 771{
 772        pud_t *pud;
 773        int i;
 774
 775        for (i = 0; i < PTRS_PER_PUD; i++) {
 776                pud = pud_start + i;
 777                if (pud_val(*pud))
 778                        return false;
 779        }
 780
 781        /* free a pud table */
 782        free_pagetable(pgd_page(*pgd), 0);
 783        spin_lock(&init_mm.page_table_lock);
 784        pgd_clear(pgd);
 785        spin_unlock(&init_mm.page_table_lock);
 786
 787        return true;
 788}
 789
 790static void __meminit
 791remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
 792                 bool direct)
 793{
 794        unsigned long next, pages = 0;
 795        pte_t *pte;
 796        void *page_addr;
 797        phys_addr_t phys_addr;
 798
 799        pte = pte_start + pte_index(addr);
 800        for (; addr < end; addr = next, pte++) {
 801                next = (addr + PAGE_SIZE) & PAGE_MASK;
 802                if (next > end)
 803                        next = end;
 804
 805                if (!pte_present(*pte))
 806                        continue;
 807
 808                /*
 809                 * We mapped [0,1G) memory as identity mapping when
 810                 * initializing, in arch/x86/kernel/head_64.S. These
 811                 * pagetables cannot be removed.
 812                 */
 813                phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
 814                if (phys_addr < (phys_addr_t)0x40000000)
 815                        return;
 816
 817                if (IS_ALIGNED(addr, PAGE_SIZE) &&
 818                    IS_ALIGNED(next, PAGE_SIZE)) {
 819                        /*
 820                         * Do not free direct mapping pages since they were
 821                         * freed when offlining, or simplely not in use.
 822                         */
 823                        if (!direct)
 824                                free_pagetable(pte_page(*pte), 0);
 825
 826                        spin_lock(&init_mm.page_table_lock);
 827                        pte_clear(&init_mm, addr, pte);
 828                        spin_unlock(&init_mm.page_table_lock);
 829
 830                        /* For non-direct mapping, pages means nothing. */
 831                        pages++;
 832                } else {
 833                        /*
 834                         * If we are here, we are freeing vmemmap pages since
 835                         * direct mapped memory ranges to be freed are aligned.
 836                         *
 837                         * If we are not removing the whole page, it means
 838                         * other page structs in this page are being used and
 839                         * we canot remove them. So fill the unused page_structs
 840                         * with 0xFD, and remove the page when it is wholly
 841                         * filled with 0xFD.
 842                         */
 843                        memset((void *)addr, PAGE_INUSE, next - addr);
 844
 845                        page_addr = page_address(pte_page(*pte));
 846                        if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
 847                                free_pagetable(pte_page(*pte), 0);
 848
 849                                spin_lock(&init_mm.page_table_lock);
 850                                pte_clear(&init_mm, addr, pte);
 851                                spin_unlock(&init_mm.page_table_lock);
 852                        }
 853                }
 854        }
 855
 856        /* Call free_pte_table() in remove_pmd_table(). */
 857        flush_tlb_all();
 858        if (direct)
 859                update_page_count(PG_LEVEL_4K, -pages);
 860}
 861
 862static void __meminit
 863remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
 864                 bool direct)
 865{
 866        unsigned long next, pages = 0;
 867        pte_t *pte_base;
 868        pmd_t *pmd;
 869        void *page_addr;
 870
 871        pmd = pmd_start + pmd_index(addr);
 872        for (; addr < end; addr = next, pmd++) {
 873                next = pmd_addr_end(addr, end);
 874
 875                if (!pmd_present(*pmd))
 876                        continue;
 877
 878                if (pmd_large(*pmd)) {
 879                        if (IS_ALIGNED(addr, PMD_SIZE) &&
 880                            IS_ALIGNED(next, PMD_SIZE)) {
 881                                if (!direct)
 882                                        free_pagetable(pmd_page(*pmd),
 883                                                       get_order(PMD_SIZE));
 884
 885                                spin_lock(&init_mm.page_table_lock);
 886                                pmd_clear(pmd);
 887                                spin_unlock(&init_mm.page_table_lock);
 888                                pages++;
 889                        } else {
 890                                /* If here, we are freeing vmemmap pages. */
 891                                memset((void *)addr, PAGE_INUSE, next - addr);
 892
 893                                page_addr = page_address(pmd_page(*pmd));
 894                                if (!memchr_inv(page_addr, PAGE_INUSE,
 895                                                PMD_SIZE)) {
 896                                        free_pagetable(pmd_page(*pmd),
 897                                                       get_order(PMD_SIZE));
 898
 899                                        spin_lock(&init_mm.page_table_lock);
 900                                        pmd_clear(pmd);
 901                                        spin_unlock(&init_mm.page_table_lock);
 902                                }
 903                        }
 904
 905                        continue;
 906                }
 907
 908                pte_base = (pte_t *)pmd_page_vaddr(*pmd);
 909                remove_pte_table(pte_base, addr, next, direct);
 910                free_pte_table(pte_base, pmd);
 911        }
 912
 913        /* Call free_pmd_table() in remove_pud_table(). */
 914        if (direct)
 915                update_page_count(PG_LEVEL_2M, -pages);
 916}
 917
 918static void __meminit
 919remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
 920                 bool direct)
 921{
 922        unsigned long next, pages = 0;
 923        pmd_t *pmd_base;
 924        pud_t *pud;
 925        void *page_addr;
 926
 927        pud = pud_start + pud_index(addr);
 928        for (; addr < end; addr = next, pud++) {
 929                next = pud_addr_end(addr, end);
 930
 931                if (!pud_present(*pud))
 932                        continue;
 933
 934                if (pud_large(*pud)) {
 935                        if (IS_ALIGNED(addr, PUD_SIZE) &&
 936                            IS_ALIGNED(next, PUD_SIZE)) {
 937                                if (!direct)
 938                                        free_pagetable(pud_page(*pud),
 939                                                       get_order(PUD_SIZE));
 940
 941                                spin_lock(&init_mm.page_table_lock);
 942                                pud_clear(pud);
 943                                spin_unlock(&init_mm.page_table_lock);
 944                                pages++;
 945                        } else {
 946                                /* If here, we are freeing vmemmap pages. */
 947                                memset((void *)addr, PAGE_INUSE, next - addr);
 948
 949                                page_addr = page_address(pud_page(*pud));
 950                                if (!memchr_inv(page_addr, PAGE_INUSE,
 951                                                PUD_SIZE)) {
 952                                        free_pagetable(pud_page(*pud),
 953                                                       get_order(PUD_SIZE));
 954
 955                                        spin_lock(&init_mm.page_table_lock);
 956                                        pud_clear(pud);
 957                                        spin_unlock(&init_mm.page_table_lock);
 958                                }
 959                        }
 960
 961                        continue;
 962                }
 963
 964                pmd_base = (pmd_t *)pud_page_vaddr(*pud);
 965                remove_pmd_table(pmd_base, addr, next, direct);
 966                free_pmd_table(pmd_base, pud);
 967        }
 968
 969        if (direct)
 970                update_page_count(PG_LEVEL_1G, -pages);
 971}
 972
 973/* start and end are both virtual address. */
 974static void __meminit
 975remove_pagetable(unsigned long start, unsigned long end, bool direct)
 976{
 977        unsigned long next;
 978        pgd_t *pgd;
 979        pud_t *pud;
 980        bool pgd_changed = false;
 981
 982        for (; start < end; start = next) {
 983                next = pgd_addr_end(start, end);
 984
 985                pgd = pgd_offset_k(start);
 986                if (!pgd_present(*pgd))
 987                        continue;
 988
 989                pud = (pud_t *)pgd_page_vaddr(*pgd);
 990                remove_pud_table(pud, start, next, direct);
 991                if (free_pud_table(pud, pgd))
 992                        pgd_changed = true;
 993        }
 994
 995        if (pgd_changed)
 996                sync_global_pgds(start, end - 1);
 997
 998        flush_tlb_all();
 999}
1000
1001void __ref vmemmap_free(unsigned long start, unsigned long end)
1002{
1003        remove_pagetable(start, end, false);
1004}
1005
1006#ifdef CONFIG_MEMORY_HOTREMOVE
1007static void __meminit
1008kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1009{
1010        start = (unsigned long)__va(start);
1011        end = (unsigned long)__va(end);
1012
1013        remove_pagetable(start, end, true);
1014}
1015
1016int __ref arch_remove_memory(u64 start, u64 size)
1017{
1018        unsigned long start_pfn = start >> PAGE_SHIFT;
1019        unsigned long nr_pages = size >> PAGE_SHIFT;
1020        struct zone *zone;
1021        int ret;
1022
1023        zone = page_zone(pfn_to_page(start_pfn));
1024        kernel_physical_mapping_remove(start, start + size);
1025        ret = __remove_pages(zone, start_pfn, nr_pages);
1026        WARN_ON_ONCE(ret);
1027
1028        return ret;
1029}
1030#endif
1031#endif /* CONFIG_MEMORY_HOTPLUG */
1032
1033static struct kcore_list kcore_vsyscall;
1034
1035static void __init register_page_bootmem_info(void)
1036{
1037#ifdef CONFIG_NUMA
1038        int i;
1039
1040        for_each_online_node(i)
1041                register_page_bootmem_info_node(NODE_DATA(i));
1042#endif
1043}
1044
1045void __init mem_init(void)
1046{
1047        pci_iommu_alloc();
1048
1049        /* clear_bss() already clear the empty_zero_page */
1050
1051        register_page_bootmem_info();
1052
1053        /* this will put all memory onto the freelists */
1054        free_all_bootmem();
1055        after_bootmem = 1;
1056
1057        /* Register memory areas for /proc/kcore */
1058        kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
1059                         VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
1060
1061        mem_init_print_info(NULL);
1062}
1063
1064#ifdef CONFIG_DEBUG_RODATA
1065const int rodata_test_data = 0xC3;
1066EXPORT_SYMBOL_GPL(rodata_test_data);
1067
1068int kernel_set_to_readonly;
1069
1070void set_kernel_text_rw(void)
1071{
1072        unsigned long start = PFN_ALIGN(_text);
1073        unsigned long end = PFN_ALIGN(__stop___ex_table);
1074
1075        if (!kernel_set_to_readonly)
1076                return;
1077
1078        pr_debug("Set kernel text: %lx - %lx for read write\n",
1079                 start, end);
1080
1081        /*
1082         * Make the kernel identity mapping for text RW. Kernel text
1083         * mapping will always be RO. Refer to the comment in
1084         * static_protections() in pageattr.c
1085         */
1086        set_memory_rw(start, (end - start) >> PAGE_SHIFT);
1087}
1088
1089void set_kernel_text_ro(void)
1090{
1091        unsigned long start = PFN_ALIGN(_text);
1092        unsigned long end = PFN_ALIGN(__stop___ex_table);
1093
1094        if (!kernel_set_to_readonly)
1095                return;
1096
1097        pr_debug("Set kernel text: %lx - %lx for read only\n",
1098                 start, end);
1099
1100        /*
1101         * Set the kernel identity mapping for text RO.
1102         */
1103        set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1104}
1105
1106void mark_rodata_ro(void)
1107{
1108        unsigned long start = PFN_ALIGN(_text);
1109        unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1110        unsigned long end = (unsigned long) &__end_rodata_hpage_align;
1111        unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
1112        unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
1113        unsigned long all_end = PFN_ALIGN(&_end);
1114
1115        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1116               (end - start) >> 10);
1117        set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1118
1119        kernel_set_to_readonly = 1;
1120
1121        /*
1122         * The rodata/data/bss/brk section (but not the kernel text!)
1123         * should also be not-executable.
1124         */
1125        set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
1126
1127        rodata_test();
1128
1129#ifdef CONFIG_CPA_DEBUG
1130        printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1131        set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1132
1133        printk(KERN_INFO "Testing CPA: again\n");
1134        set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1135#endif
1136
1137        free_init_pages("unused kernel",
1138                        (unsigned long) __va(__pa_symbol(text_end)),
1139                        (unsigned long) __va(__pa_symbol(rodata_start)));
1140        free_init_pages("unused kernel",
1141                        (unsigned long) __va(__pa_symbol(rodata_end)),
1142                        (unsigned long) __va(__pa_symbol(_sdata)));
1143}
1144
1145#endif
1146
1147int kern_addr_valid(unsigned long addr)
1148{
1149        unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
1150        pgd_t *pgd;
1151        pud_t *pud;
1152        pmd_t *pmd;
1153        pte_t *pte;
1154
1155        if (above != 0 && above != -1UL)
1156                return 0;
1157
1158        pgd = pgd_offset_k(addr);
1159        if (pgd_none(*pgd))
1160                return 0;
1161
1162        pud = pud_offset(pgd, addr);
1163        if (pud_none(*pud))
1164                return 0;
1165
1166        if (pud_large(*pud))
1167                return pfn_valid(pud_pfn(*pud));
1168
1169        pmd = pmd_offset(pud, addr);
1170        if (pmd_none(*pmd))
1171                return 0;
1172
1173        if (pmd_large(*pmd))
1174                return pfn_valid(pmd_pfn(*pmd));
1175
1176        pte = pte_offset_kernel(pmd, addr);
1177        if (pte_none(*pte))
1178                return 0;
1179
1180        return pfn_valid(pte_pfn(*pte));
1181}
1182
1183/*
1184 * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
1185 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
1186 * not need special handling anymore:
1187 */
1188static struct vm_area_struct gate_vma = {
1189        .vm_start       = VSYSCALL_START,
1190        .vm_end         = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
1191        .vm_page_prot   = PAGE_READONLY_EXEC,
1192        .vm_flags       = VM_READ | VM_EXEC
1193};
1194
1195struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
1196{
1197#ifdef CONFIG_IA32_EMULATION
1198        if (!mm || mm->context.ia32_compat)
1199                return NULL;
1200#endif
1201        return &gate_vma;
1202}
1203
1204int in_gate_area(struct mm_struct *mm, unsigned long addr)
1205{
1206        struct vm_area_struct *vma = get_gate_vma(mm);
1207
1208        if (!vma)
1209                return 0;
1210
1211        return (addr >= vma->vm_start) && (addr < vma->vm_end);
1212}
1213
1214/*
1215 * Use this when you have no reliable mm, typically from interrupt
1216 * context. It is less reliable than using a task's mm and may give
1217 * false positives.
1218 */
1219int in_gate_area_no_mm(unsigned long addr)
1220{
1221        return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1222}
1223
1224const char *arch_vma_name(struct vm_area_struct *vma)
1225{
1226        if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
1227                return "[vdso]";
1228        if (vma == &gate_vma)
1229                return "[vsyscall]";
1230        return NULL;
1231}
1232
1233#ifdef CONFIG_X86_UV
1234unsigned long memory_block_size_bytes(void)
1235{
1236        if (is_uv_system()) {
1237                printk(KERN_INFO "UV: memory block size 2GB\n");
1238                return 2UL * 1024 * 1024 * 1024;
1239        }
1240        return MIN_MEMORY_BLOCK_SIZE;
1241}
1242#endif
1243
1244#ifdef CONFIG_SPARSEMEM_VMEMMAP
1245/*
1246 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1247 */
1248static long __meminitdata addr_start, addr_end;
1249static void __meminitdata *p_start, *p_end;
1250static int __meminitdata node_start;
1251
1252static int __meminit vmemmap_populate_hugepages(unsigned long start,
1253                                                unsigned long end, int node)
1254{
1255        unsigned long addr;
1256        unsigned long next;
1257        pgd_t *pgd;
1258        pud_t *pud;
1259        pmd_t *pmd;
1260
1261        for (addr = start; addr < end; addr = next) {
1262                next = pmd_addr_end(addr, end);
1263
1264                pgd = vmemmap_pgd_populate(addr, node);
1265                if (!pgd)
1266                        return -ENOMEM;
1267
1268                pud = vmemmap_pud_populate(pgd, addr, node);
1269                if (!pud)
1270                        return -ENOMEM;
1271
1272                pmd = pmd_offset(pud, addr);
1273                if (pmd_none(*pmd)) {
1274                        void *p;
1275
1276                        p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1277                        if (p) {
1278                                pte_t entry;
1279
1280                                entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1281                                                PAGE_KERNEL_LARGE);
1282                                set_pmd(pmd, __pmd(pte_val(entry)));
1283
1284                                /* check to see if we have contiguous blocks */
1285                                if (p_end != p || node_start != node) {
1286                                        if (p_start)
1287                                                printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1288                                                       addr_start, addr_end-1, p_start, p_end-1, node_start);
1289                                        addr_start = addr;
1290                                        node_start = node;
1291                                        p_start = p;
1292                                }
1293
1294                                addr_end = addr + PMD_SIZE;
1295                                p_end = p + PMD_SIZE;
1296                                continue;
1297                        }
1298                } else if (pmd_large(*pmd)) {
1299                        vmemmap_verify((pte_t *)pmd, node, addr, next);
1300                        continue;
1301                }
1302                pr_warn_once("vmemmap: falling back to regular page backing\n");
1303                if (vmemmap_populate_basepages(addr, next, node))
1304                        return -ENOMEM;
1305        }
1306        return 0;
1307}
1308
1309int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
1310{
1311        int err;
1312
1313        if (cpu_has_pse)
1314                err = vmemmap_populate_hugepages(start, end, node);
1315        else
1316                err = vmemmap_populate_basepages(start, end, node);
1317        if (!err)
1318                sync_global_pgds(start, end - 1);
1319        return err;
1320}
1321
1322#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1323void register_page_bootmem_memmap(unsigned long section_nr,
1324                                  struct page *start_page, unsigned long size)
1325{
1326        unsigned long addr = (unsigned long)start_page;
1327        unsigned long end = (unsigned long)(start_page + size);
1328        unsigned long next;
1329        pgd_t *pgd;
1330        pud_t *pud;
1331        pmd_t *pmd;
1332        unsigned int nr_pages;
1333        struct page *page;
1334
1335        for (; addr < end; addr = next) {
1336                pte_t *pte = NULL;
1337
1338                pgd = pgd_offset_k(addr);
1339                if (pgd_none(*pgd)) {
1340                        next = (addr + PAGE_SIZE) & PAGE_MASK;
1341                        continue;
1342                }
1343                get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
1344
1345                pud = pud_offset(pgd, addr);
1346                if (pud_none(*pud)) {
1347                        next = (addr + PAGE_SIZE) & PAGE_MASK;
1348                        continue;
1349                }
1350                get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1351
1352                if (!cpu_has_pse) {
1353                        next = (addr + PAGE_SIZE) & PAGE_MASK;
1354                        pmd = pmd_offset(pud, addr);
1355                        if (pmd_none(*pmd))
1356                                continue;
1357                        get_page_bootmem(section_nr, pmd_page(*pmd),
1358                                         MIX_SECTION_INFO);
1359
1360                        pte = pte_offset_kernel(pmd, addr);
1361                        if (pte_none(*pte))
1362                                continue;
1363                        get_page_bootmem(section_nr, pte_page(*pte),
1364                                         SECTION_INFO);
1365                } else {
1366                        next = pmd_addr_end(addr, end);
1367
1368                        pmd = pmd_offset(pud, addr);
1369                        if (pmd_none(*pmd))
1370                                continue;
1371
1372                        nr_pages = 1 << (get_order(PMD_SIZE));
1373                        page = pmd_page(*pmd);
1374                        while (nr_pages--)
1375                                get_page_bootmem(section_nr, page++,
1376                                                 SECTION_INFO);
1377                }
1378        }
1379}
1380#endif
1381
1382void __meminit vmemmap_populate_print_last(void)
1383{
1384        if (p_start) {
1385                printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1386                        addr_start, addr_end-1, p_start, p_end-1, node_start);
1387                p_start = NULL;
1388                p_end = NULL;
1389                node_start = 0;
1390        }
1391}
1392#endif
1393