linux/arch/x86/mm/init_64.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/x86_64/mm/init.c
   3 *
   4 *  Copyright (C) 1995  Linus Torvalds
   5 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
   6 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
   7 */
   8
   9#include <linux/signal.h>
  10#include <linux/sched.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/string.h>
  14#include <linux/types.h>
  15#include <linux/ptrace.h>
  16#include <linux/mman.h>
  17#include <linux/mm.h>
  18#include <linux/swap.h>
  19#include <linux/smp.h>
  20#include <linux/init.h>
  21#include <linux/initrd.h>
  22#include <linux/pagemap.h>
  23#include <linux/bootmem.h>
  24#include <linux/memblock.h>
  25#include <linux/proc_fs.h>
  26#include <linux/pci.h>
  27#include <linux/pfn.h>
  28#include <linux/poison.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/module.h>
  31#include <linux/memory.h>
  32#include <linux/memory_hotplug.h>
  33#include <linux/nmi.h>
  34#include <linux/gfp.h>
  35
  36#include <asm/processor.h>
  37#include <asm/bios_ebda.h>
  38#include <asm/uaccess.h>
  39#include <asm/pgtable.h>
  40#include <asm/pgalloc.h>
  41#include <asm/dma.h>
  42#include <asm/fixmap.h>
  43#include <asm/e820.h>
  44#include <asm/apic.h>
  45#include <asm/tlb.h>
  46#include <asm/mmu_context.h>
  47#include <asm/proto.h>
  48#include <asm/smp.h>
  49#include <asm/sections.h>
  50#include <asm/kdebug.h>
  51#include <asm/numa.h>
  52#include <asm/cacheflush.h>
  53#include <asm/init.h>
  54#include <asm/uv/uv.h>
  55#include <asm/setup.h>
  56
  57static int __init parse_direct_gbpages_off(char *arg)
  58{
  59        direct_gbpages = 0;
  60        return 0;
  61}
  62early_param("nogbpages", parse_direct_gbpages_off);
  63
  64static int __init parse_direct_gbpages_on(char *arg)
  65{
  66        direct_gbpages = 1;
  67        return 0;
  68}
  69early_param("gbpages", parse_direct_gbpages_on);
  70
  71/*
  72 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
  73 * physical space so we can cache the place of the first one and move
  74 * around without checking the pgd every time.
  75 */
  76
  77pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
  78EXPORT_SYMBOL_GPL(__supported_pte_mask);
  79
  80int force_personality32;
  81
  82/*
  83 * noexec32=on|off
  84 * Control non executable heap for 32bit processes.
  85 * To control the stack too use noexec=off
  86 *
  87 * on   PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
  88 * off  PROT_READ implies PROT_EXEC
  89 */
  90static int __init nonx32_setup(char *str)
  91{
  92        if (!strcmp(str, "on"))
  93                force_personality32 &= ~READ_IMPLIES_EXEC;
  94        else if (!strcmp(str, "off"))
  95                force_personality32 |= READ_IMPLIES_EXEC;
  96        return 1;
  97}
  98__setup("noexec32=", nonx32_setup);
  99
 100/*
 101 * When memory was added/removed make sure all the processes MM have
 102 * suitable PGD entries in the local PGD level page.
 103 */
 104void sync_global_pgds(unsigned long start, unsigned long end)
 105{
 106        unsigned long address;
 107
 108        for (address = start; address <= end; address += PGDIR_SIZE) {
 109                const pgd_t *pgd_ref = pgd_offset_k(address);
 110                struct page *page;
 111
 112                if (pgd_none(*pgd_ref))
 113                        continue;
 114
 115                spin_lock(&pgd_lock);
 116                list_for_each_entry(page, &pgd_list, lru) {
 117                        pgd_t *pgd;
 118                        spinlock_t *pgt_lock;
 119
 120                        pgd = (pgd_t *)page_address(page) + pgd_index(address);
 121                        /* the pgt_lock only for Xen */
 122                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 123                        spin_lock(pgt_lock);
 124
 125                        if (pgd_none(*pgd))
 126                                set_pgd(pgd, *pgd_ref);
 127                        else
 128                                BUG_ON(pgd_page_vaddr(*pgd)
 129                                       != pgd_page_vaddr(*pgd_ref));
 130
 131                        spin_unlock(pgt_lock);
 132                }
 133                spin_unlock(&pgd_lock);
 134        }
 135}
 136
 137/*
 138 * NOTE: This function is marked __ref because it calls __init function
 139 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 140 */
 141static __ref void *spp_getpage(void)
 142{
 143        void *ptr;
 144
 145        if (after_bootmem)
 146                ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
 147        else
 148                ptr = alloc_bootmem_pages(PAGE_SIZE);
 149
 150        if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
 151                panic("set_pte_phys: cannot allocate page data %s\n",
 152                        after_bootmem ? "after bootmem" : "");
 153        }
 154
 155        pr_debug("spp_getpage %p\n", ptr);
 156
 157        return ptr;
 158}
 159
 160static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
 161{
 162        if (pgd_none(*pgd)) {
 163                pud_t *pud = (pud_t *)spp_getpage();
 164                pgd_populate(&init_mm, pgd, pud);
 165                if (pud != pud_offset(pgd, 0))
 166                        printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
 167                               pud, pud_offset(pgd, 0));
 168        }
 169        return pud_offset(pgd, vaddr);
 170}
 171
 172static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
 173{
 174        if (pud_none(*pud)) {
 175                pmd_t *pmd = (pmd_t *) spp_getpage();
 176                pud_populate(&init_mm, pud, pmd);
 177                if (pmd != pmd_offset(pud, 0))
 178                        printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
 179                               pmd, pmd_offset(pud, 0));
 180        }
 181        return pmd_offset(pud, vaddr);
 182}
 183
 184static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
 185{
 186        if (pmd_none(*pmd)) {
 187                pte_t *pte = (pte_t *) spp_getpage();
 188                pmd_populate_kernel(&init_mm, pmd, pte);
 189                if (pte != pte_offset_kernel(pmd, 0))
 190                        printk(KERN_ERR "PAGETABLE BUG #02!\n");
 191        }
 192        return pte_offset_kernel(pmd, vaddr);
 193}
 194
 195void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
 196{
 197        pud_t *pud;
 198        pmd_t *pmd;
 199        pte_t *pte;
 200
 201        pud = pud_page + pud_index(vaddr);
 202        pmd = fill_pmd(pud, vaddr);
 203        pte = fill_pte(pmd, vaddr);
 204
 205        set_pte(pte, new_pte);
 206
 207        /*
 208         * It's enough to flush this one mapping.
 209         * (PGE mappings get flushed as well)
 210         */
 211        __flush_tlb_one(vaddr);
 212}
 213
 214void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
 215{
 216        pgd_t *pgd;
 217        pud_t *pud_page;
 218
 219        pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
 220
 221        pgd = pgd_offset_k(vaddr);
 222        if (pgd_none(*pgd)) {
 223                printk(KERN_ERR
 224                        "PGD FIXMAP MISSING, it should be setup in head.S!\n");
 225                return;
 226        }
 227        pud_page = (pud_t*)pgd_page_vaddr(*pgd);
 228        set_pte_vaddr_pud(pud_page, vaddr, pteval);
 229}
 230
 231pmd_t * __init populate_extra_pmd(unsigned long vaddr)
 232{
 233        pgd_t *pgd;
 234        pud_t *pud;
 235
 236        pgd = pgd_offset_k(vaddr);
 237        pud = fill_pud(pgd, vaddr);
 238        return fill_pmd(pud, vaddr);
 239}
 240
 241pte_t * __init populate_extra_pte(unsigned long vaddr)
 242{
 243        pmd_t *pmd;
 244
 245        pmd = populate_extra_pmd(vaddr);
 246        return fill_pte(pmd, vaddr);
 247}
 248
 249/*
 250 * Create large page table mappings for a range of physical addresses.
 251 */
 252static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
 253                                                pgprot_t prot)
 254{
 255        pgd_t *pgd;
 256        pud_t *pud;
 257        pmd_t *pmd;
 258
 259        BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
 260        for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
 261                pgd = pgd_offset_k((unsigned long)__va(phys));
 262                if (pgd_none(*pgd)) {
 263                        pud = (pud_t *) spp_getpage();
 264                        set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
 265                                                _PAGE_USER));
 266                }
 267                pud = pud_offset(pgd, (unsigned long)__va(phys));
 268                if (pud_none(*pud)) {
 269                        pmd = (pmd_t *) spp_getpage();
 270                        set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
 271                                                _PAGE_USER));
 272                }
 273                pmd = pmd_offset(pud, phys);
 274                BUG_ON(!pmd_none(*pmd));
 275                set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
 276        }
 277}
 278
 279void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
 280{
 281        __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
 282}
 283
 284void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
 285{
 286        __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
 287}
 288
 289/*
 290 * The head.S code sets up the kernel high mapping:
 291 *
 292 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
 293 *
 294 * phys_addr holds the negative offset to the kernel, which is added
 295 * to the compile time generated pmds. This results in invalid pmds up
 296 * to the point where we hit the physaddr 0 mapping.
 297 *
 298 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 299 * is rounded up to the 2MB boundary. This catches the invalid pmds as
 300 * well, as they are located before _text:
 301 */
 302void __init cleanup_highmap(void)
 303{
 304        unsigned long vaddr = __START_KERNEL_map;
 305        unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
 306        unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
 307        pmd_t *pmd = level2_kernel_pgt;
 308
 309        for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
 310                if (pmd_none(*pmd))
 311                        continue;
 312                if (vaddr < (unsigned long) _text || vaddr > end)
 313                        set_pmd(pmd, __pmd(0));
 314        }
 315}
 316
 317static __ref void *alloc_low_page(unsigned long *phys)
 318{
 319        unsigned long pfn = pgt_buf_end++;
 320        void *adr;
 321
 322        if (after_bootmem) {
 323                adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
 324                *phys = __pa(adr);
 325
 326                return adr;
 327        }
 328
 329        if (pfn >= pgt_buf_top)
 330                panic("alloc_low_page: ran out of memory");
 331
 332        adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
 333        clear_page(adr);
 334        *phys  = pfn * PAGE_SIZE;
 335        return adr;
 336}
 337
 338static __ref void *map_low_page(void *virt)
 339{
 340        void *adr;
 341        unsigned long phys, left;
 342
 343        if (after_bootmem)
 344                return virt;
 345
 346        phys = __pa(virt);
 347        left = phys & (PAGE_SIZE - 1);
 348        adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
 349        adr = (void *)(((unsigned long)adr) | left);
 350
 351        return adr;
 352}
 353
 354static __ref void unmap_low_page(void *adr)
 355{
 356        if (after_bootmem)
 357                return;
 358
 359        early_iounmap((void *)((unsigned long)adr & PAGE_MASK), PAGE_SIZE);
 360}
 361
 362static unsigned long __meminit
 363phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
 364              pgprot_t prot)
 365{
 366        unsigned pages = 0;
 367        unsigned long last_map_addr = end;
 368        int i;
 369
 370        pte_t *pte = pte_page + pte_index(addr);
 371
 372        for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
 373
 374                if (addr >= end) {
 375                        if (!after_bootmem) {
 376                                for(; i < PTRS_PER_PTE; i++, pte++)
 377                                        set_pte(pte, __pte(0));
 378                        }
 379                        break;
 380                }
 381
 382                /*
 383                 * We will re-use the existing mapping.
 384                 * Xen for example has some special requirements, like mapping
 385                 * pagetable pages as RO. So assume someone who pre-setup
 386                 * these mappings are more intelligent.
 387                 */
 388                if (pte_val(*pte)) {
 389                        pages++;
 390                        continue;
 391                }
 392
 393                if (0)
 394                        printk("   pte=%p addr=%lx pte=%016lx\n",
 395                               pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
 396                pages++;
 397                set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
 398                last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
 399        }
 400
 401        update_page_count(PG_LEVEL_4K, pages);
 402
 403        return last_map_addr;
 404}
 405
 406static unsigned long __meminit
 407phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
 408              unsigned long page_size_mask, pgprot_t prot)
 409{
 410        unsigned long pages = 0;
 411        unsigned long last_map_addr = end;
 412
 413        int i = pmd_index(address);
 414
 415        for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
 416                unsigned long pte_phys;
 417                pmd_t *pmd = pmd_page + pmd_index(address);
 418                pte_t *pte;
 419                pgprot_t new_prot = prot;
 420
 421                if (address >= end) {
 422                        if (!after_bootmem) {
 423                                for (; i < PTRS_PER_PMD; i++, pmd++)
 424                                        set_pmd(pmd, __pmd(0));
 425                        }
 426                        break;
 427                }
 428
 429                if (pmd_val(*pmd)) {
 430                        if (!pmd_large(*pmd)) {
 431                                spin_lock(&init_mm.page_table_lock);
 432                                pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd));
 433                                last_map_addr = phys_pte_init(pte, address,
 434                                                                end, prot);
 435                                unmap_low_page(pte);
 436                                spin_unlock(&init_mm.page_table_lock);
 437                                continue;
 438                        }
 439                        /*
 440                         * If we are ok with PG_LEVEL_2M mapping, then we will
 441                         * use the existing mapping,
 442                         *
 443                         * Otherwise, we will split the large page mapping but
 444                         * use the same existing protection bits except for
 445                         * large page, so that we don't violate Intel's TLB
 446                         * Application note (317080) which says, while changing
 447                         * the page sizes, new and old translations should
 448                         * not differ with respect to page frame and
 449                         * attributes.
 450                         */
 451                        if (page_size_mask & (1 << PG_LEVEL_2M)) {
 452                                pages++;
 453                                continue;
 454                        }
 455                        new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
 456                }
 457
 458                if (page_size_mask & (1<<PG_LEVEL_2M)) {
 459                        pages++;
 460                        spin_lock(&init_mm.page_table_lock);
 461                        set_pte((pte_t *)pmd,
 462                                pfn_pte(address >> PAGE_SHIFT,
 463                                        __pgprot(pgprot_val(prot) | _PAGE_PSE)));
 464                        spin_unlock(&init_mm.page_table_lock);
 465                        last_map_addr = (address & PMD_MASK) + PMD_SIZE;
 466                        continue;
 467                }
 468
 469                pte = alloc_low_page(&pte_phys);
 470                last_map_addr = phys_pte_init(pte, address, end, new_prot);
 471                unmap_low_page(pte);
 472
 473                spin_lock(&init_mm.page_table_lock);
 474                pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
 475                spin_unlock(&init_mm.page_table_lock);
 476        }
 477        update_page_count(PG_LEVEL_2M, pages);
 478        return last_map_addr;
 479}
 480
 481static unsigned long __meminit
 482phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
 483                         unsigned long page_size_mask)
 484{
 485        unsigned long pages = 0;
 486        unsigned long last_map_addr = end;
 487        int i = pud_index(addr);
 488
 489        for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
 490                unsigned long pmd_phys;
 491                pud_t *pud = pud_page + pud_index(addr);
 492                pmd_t *pmd;
 493                pgprot_t prot = PAGE_KERNEL;
 494
 495                if (addr >= end)
 496                        break;
 497
 498                if (!after_bootmem &&
 499                                !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
 500                        set_pud(pud, __pud(0));
 501                        continue;
 502                }
 503
 504                if (pud_val(*pud)) {
 505                        if (!pud_large(*pud)) {
 506                                pmd = map_low_page(pmd_offset(pud, 0));
 507                                last_map_addr = phys_pmd_init(pmd, addr, end,
 508                                                         page_size_mask, prot);
 509                                unmap_low_page(pmd);
 510                                __flush_tlb_all();
 511                                continue;
 512                        }
 513                        /*
 514                         * If we are ok with PG_LEVEL_1G mapping, then we will
 515                         * use the existing mapping.
 516                         *
 517                         * Otherwise, we will split the gbpage mapping but use
 518                         * the same existing protection  bits except for large
 519                         * page, so that we don't violate Intel's TLB
 520                         * Application note (317080) which says, while changing
 521                         * the page sizes, new and old translations should
 522                         * not differ with respect to page frame and
 523                         * attributes.
 524                         */
 525                        if (page_size_mask & (1 << PG_LEVEL_1G)) {
 526                                pages++;
 527                                continue;
 528                        }
 529                        prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
 530                }
 531
 532                if (page_size_mask & (1<<PG_LEVEL_1G)) {
 533                        pages++;
 534                        spin_lock(&init_mm.page_table_lock);
 535                        set_pte((pte_t *)pud,
 536                                pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
 537                        spin_unlock(&init_mm.page_table_lock);
 538                        last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
 539                        continue;
 540                }
 541
 542                pmd = alloc_low_page(&pmd_phys);
 543                last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
 544                                              prot);
 545                unmap_low_page(pmd);
 546
 547                spin_lock(&init_mm.page_table_lock);
 548                pud_populate(&init_mm, pud, __va(pmd_phys));
 549                spin_unlock(&init_mm.page_table_lock);
 550        }
 551        __flush_tlb_all();
 552
 553        update_page_count(PG_LEVEL_1G, pages);
 554
 555        return last_map_addr;
 556}
 557
 558unsigned long __meminit
 559kernel_physical_mapping_init(unsigned long start,
 560                             unsigned long end,
 561                             unsigned long page_size_mask)
 562{
 563        bool pgd_changed = false;
 564        unsigned long next, last_map_addr = end;
 565        unsigned long addr;
 566
 567        start = (unsigned long)__va(start);
 568        end = (unsigned long)__va(end);
 569        addr = start;
 570
 571        for (; start < end; start = next) {
 572                pgd_t *pgd = pgd_offset_k(start);
 573                unsigned long pud_phys;
 574                pud_t *pud;
 575
 576                next = (start + PGDIR_SIZE) & PGDIR_MASK;
 577                if (next > end)
 578                        next = end;
 579
 580                if (pgd_val(*pgd)) {
 581                        pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd));
 582                        last_map_addr = phys_pud_init(pud, __pa(start),
 583                                                 __pa(end), page_size_mask);
 584                        unmap_low_page(pud);
 585                        continue;
 586                }
 587
 588                pud = alloc_low_page(&pud_phys);
 589                last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
 590                                                 page_size_mask);
 591                unmap_low_page(pud);
 592
 593                spin_lock(&init_mm.page_table_lock);
 594                pgd_populate(&init_mm, pgd, __va(pud_phys));
 595                spin_unlock(&init_mm.page_table_lock);
 596                pgd_changed = true;
 597        }
 598
 599        if (pgd_changed)
 600                sync_global_pgds(addr, end);
 601
 602        __flush_tlb_all();
 603
 604        return last_map_addr;
 605}
 606
 607#ifndef CONFIG_NUMA
 608void __init initmem_init(void)
 609{
 610        memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
 611}
 612#endif
 613
 614void __init paging_init(void)
 615{
 616        sparse_memory_present_with_active_regions(MAX_NUMNODES);
 617        sparse_init();
 618
 619        /*
 620         * clear the default setting with node 0
 621         * note: don't use nodes_clear here, that is really clearing when
 622         *       numa support is not compiled in, and later node_set_state
 623         *       will not set it back.
 624         */
 625        node_clear_state(0, N_NORMAL_MEMORY);
 626
 627        zone_sizes_init();
 628}
 629
 630/*
 631 * Memory hotplug specific functions
 632 */
 633#ifdef CONFIG_MEMORY_HOTPLUG
 634/*
 635 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 636 * updating.
 637 */
 638static void  update_end_of_memory_vars(u64 start, u64 size)
 639{
 640        unsigned long end_pfn = PFN_UP(start + size);
 641
 642        if (end_pfn > max_pfn) {
 643                max_pfn = end_pfn;
 644                max_low_pfn = end_pfn;
 645                high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
 646        }
 647}
 648
 649/*
 650 * Memory is added always to NORMAL zone. This means you will never get
 651 * additional DMA/DMA32 memory.
 652 */
 653int arch_add_memory(int nid, u64 start, u64 size)
 654{
 655        struct pglist_data *pgdat = NODE_DATA(nid);
 656        struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
 657        unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
 658        unsigned long nr_pages = size >> PAGE_SHIFT;
 659        int ret;
 660
 661        last_mapped_pfn = init_memory_mapping(start, start + size);
 662        if (last_mapped_pfn > max_pfn_mapped)
 663                max_pfn_mapped = last_mapped_pfn;
 664
 665        ret = __add_pages(nid, zone, start_pfn, nr_pages);
 666        WARN_ON_ONCE(ret);
 667
 668        /* update max_pfn, max_low_pfn and high_memory */
 669        update_end_of_memory_vars(start, size);
 670
 671        return ret;
 672}
 673EXPORT_SYMBOL_GPL(arch_add_memory);
 674
 675#endif /* CONFIG_MEMORY_HOTPLUG */
 676
 677static struct kcore_list kcore_vsyscall;
 678
 679void __init mem_init(void)
 680{
 681        long codesize, reservedpages, datasize, initsize;
 682        unsigned long absent_pages;
 683
 684        pci_iommu_alloc();
 685
 686        /* clear_bss() already clear the empty_zero_page */
 687
 688        reservedpages = 0;
 689
 690        /* this will put all low memory onto the freelists */
 691#ifdef CONFIG_NUMA
 692        totalram_pages = numa_free_all_bootmem();
 693#else
 694        totalram_pages = free_all_bootmem();
 695#endif
 696
 697        absent_pages = absent_pages_in_range(0, max_pfn);
 698        reservedpages = max_pfn - totalram_pages - absent_pages;
 699        after_bootmem = 1;
 700
 701        codesize =  (unsigned long) &_etext - (unsigned long) &_text;
 702        datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
 703        initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
 704
 705        /* Register memory areas for /proc/kcore */
 706        kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
 707                         VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
 708
 709        printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
 710                         "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
 711                nr_free_pages() << (PAGE_SHIFT-10),
 712                max_pfn << (PAGE_SHIFT-10),
 713                codesize >> 10,
 714                absent_pages << (PAGE_SHIFT-10),
 715                reservedpages << (PAGE_SHIFT-10),
 716                datasize >> 10,
 717                initsize >> 10);
 718}
 719
 720#ifdef CONFIG_DEBUG_RODATA
 721const int rodata_test_data = 0xC3;
 722EXPORT_SYMBOL_GPL(rodata_test_data);
 723
 724int kernel_set_to_readonly;
 725
 726void set_kernel_text_rw(void)
 727{
 728        unsigned long start = PFN_ALIGN(_text);
 729        unsigned long end = PFN_ALIGN(__stop___ex_table);
 730
 731        if (!kernel_set_to_readonly)
 732                return;
 733
 734        pr_debug("Set kernel text: %lx - %lx for read write\n",
 735                 start, end);
 736
 737        /*
 738         * Make the kernel identity mapping for text RW. Kernel text
 739         * mapping will always be RO. Refer to the comment in
 740         * static_protections() in pageattr.c
 741         */
 742        set_memory_rw(start, (end - start) >> PAGE_SHIFT);
 743}
 744
 745void set_kernel_text_ro(void)
 746{
 747        unsigned long start = PFN_ALIGN(_text);
 748        unsigned long end = PFN_ALIGN(__stop___ex_table);
 749
 750        if (!kernel_set_to_readonly)
 751                return;
 752
 753        pr_debug("Set kernel text: %lx - %lx for read only\n",
 754                 start, end);
 755
 756        /*
 757         * Set the kernel identity mapping for text RO.
 758         */
 759        set_memory_ro(start, (end - start) >> PAGE_SHIFT);
 760}
 761
 762void mark_rodata_ro(void)
 763{
 764        unsigned long start = PFN_ALIGN(_text);
 765        unsigned long rodata_start =
 766                ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
 767        unsigned long end = (unsigned long) &__end_rodata_hpage_align;
 768        unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table);
 769        unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata);
 770        unsigned long data_start = (unsigned long) &_sdata;
 771
 772        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
 773               (end - start) >> 10);
 774        set_memory_ro(start, (end - start) >> PAGE_SHIFT);
 775
 776        kernel_set_to_readonly = 1;
 777
 778        /*
 779         * The rodata section (but not the kernel text!) should also be
 780         * not-executable.
 781         */
 782        set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
 783
 784        rodata_test();
 785
 786#ifdef CONFIG_CPA_DEBUG
 787        printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
 788        set_memory_rw(start, (end-start) >> PAGE_SHIFT);
 789
 790        printk(KERN_INFO "Testing CPA: again\n");
 791        set_memory_ro(start, (end-start) >> PAGE_SHIFT);
 792#endif
 793
 794        free_init_pages("unused kernel memory",
 795                        (unsigned long) page_address(virt_to_page(text_end)),
 796                        (unsigned long)
 797                                 page_address(virt_to_page(rodata_start)));
 798        free_init_pages("unused kernel memory",
 799                        (unsigned long) page_address(virt_to_page(rodata_end)),
 800                        (unsigned long) page_address(virt_to_page(data_start)));
 801}
 802
 803#endif
 804
 805int kern_addr_valid(unsigned long addr)
 806{
 807        unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
 808        pgd_t *pgd;
 809        pud_t *pud;
 810        pmd_t *pmd;
 811        pte_t *pte;
 812
 813        if (above != 0 && above != -1UL)
 814                return 0;
 815
 816        pgd = pgd_offset_k(addr);
 817        if (pgd_none(*pgd))
 818                return 0;
 819
 820        pud = pud_offset(pgd, addr);
 821        if (pud_none(*pud))
 822                return 0;
 823
 824        pmd = pmd_offset(pud, addr);
 825        if (pmd_none(*pmd))
 826                return 0;
 827
 828        if (pmd_large(*pmd))
 829                return pfn_valid(pmd_pfn(*pmd));
 830
 831        pte = pte_offset_kernel(pmd, addr);
 832        if (pte_none(*pte))
 833                return 0;
 834
 835        return pfn_valid(pte_pfn(*pte));
 836}
 837
 838/*
 839 * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
 840 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
 841 * not need special handling anymore:
 842 */
 843static struct vm_area_struct gate_vma = {
 844        .vm_start       = VSYSCALL_START,
 845        .vm_end         = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
 846        .vm_page_prot   = PAGE_READONLY_EXEC,
 847        .vm_flags       = VM_READ | VM_EXEC
 848};
 849
 850struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
 851{
 852#ifdef CONFIG_IA32_EMULATION
 853        if (!mm || mm->context.ia32_compat)
 854                return NULL;
 855#endif
 856        return &gate_vma;
 857}
 858
 859int in_gate_area(struct mm_struct *mm, unsigned long addr)
 860{
 861        struct vm_area_struct *vma = get_gate_vma(mm);
 862
 863        if (!vma)
 864                return 0;
 865
 866        return (addr >= vma->vm_start) && (addr < vma->vm_end);
 867}
 868
 869/*
 870 * Use this when you have no reliable mm, typically from interrupt
 871 * context. It is less reliable than using a task's mm and may give
 872 * false positives.
 873 */
 874int in_gate_area_no_mm(unsigned long addr)
 875{
 876        return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
 877}
 878
 879const char *arch_vma_name(struct vm_area_struct *vma)
 880{
 881        if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
 882                return "[vdso]";
 883        if (vma == &gate_vma)
 884                return "[vsyscall]";
 885        return NULL;
 886}
 887
 888#ifdef CONFIG_X86_UV
 889unsigned long memory_block_size_bytes(void)
 890{
 891        if (is_uv_system()) {
 892                printk(KERN_INFO "UV: memory block size 2GB\n");
 893                return 2UL * 1024 * 1024 * 1024;
 894        }
 895        return MIN_MEMORY_BLOCK_SIZE;
 896}
 897#endif
 898
 899#ifdef CONFIG_SPARSEMEM_VMEMMAP
 900/*
 901 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
 902 */
 903static long __meminitdata addr_start, addr_end;
 904static void __meminitdata *p_start, *p_end;
 905static int __meminitdata node_start;
 906
 907int __meminit
 908vmemmap_populate(struct page *start_page, unsigned long size, int node)
 909{
 910        unsigned long addr = (unsigned long)start_page;
 911        unsigned long end = (unsigned long)(start_page + size);
 912        unsigned long next;
 913        pgd_t *pgd;
 914        pud_t *pud;
 915        pmd_t *pmd;
 916
 917        for (; addr < end; addr = next) {
 918                void *p = NULL;
 919
 920                pgd = vmemmap_pgd_populate(addr, node);
 921                if (!pgd)
 922                        return -ENOMEM;
 923
 924                pud = vmemmap_pud_populate(pgd, addr, node);
 925                if (!pud)
 926                        return -ENOMEM;
 927
 928                if (!cpu_has_pse) {
 929                        next = (addr + PAGE_SIZE) & PAGE_MASK;
 930                        pmd = vmemmap_pmd_populate(pud, addr, node);
 931
 932                        if (!pmd)
 933                                return -ENOMEM;
 934
 935                        p = vmemmap_pte_populate(pmd, addr, node);
 936
 937                        if (!p)
 938                                return -ENOMEM;
 939
 940                        addr_end = addr + PAGE_SIZE;
 941                        p_end = p + PAGE_SIZE;
 942                } else {
 943                        next = pmd_addr_end(addr, end);
 944
 945                        pmd = pmd_offset(pud, addr);
 946                        if (pmd_none(*pmd)) {
 947                                pte_t entry;
 948
 949                                p = vmemmap_alloc_block_buf(PMD_SIZE, node);
 950                                if (!p)
 951                                        return -ENOMEM;
 952
 953                                entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
 954                                                PAGE_KERNEL_LARGE);
 955                                set_pmd(pmd, __pmd(pte_val(entry)));
 956
 957                                /* check to see if we have contiguous blocks */
 958                                if (p_end != p || node_start != node) {
 959                                        if (p_start)
 960                                                printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
 961                                                       addr_start, addr_end-1, p_start, p_end-1, node_start);
 962                                        addr_start = addr;
 963                                        node_start = node;
 964                                        p_start = p;
 965                                }
 966
 967                                addr_end = addr + PMD_SIZE;
 968                                p_end = p + PMD_SIZE;
 969                        } else
 970                                vmemmap_verify((pte_t *)pmd, node, addr, next);
 971                }
 972
 973        }
 974        sync_global_pgds((unsigned long)start_page, end);
 975        return 0;
 976}
 977
 978void __meminit vmemmap_populate_print_last(void)
 979{
 980        if (p_start) {
 981                printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
 982                        addr_start, addr_end-1, p_start, p_end-1, node_start);
 983                p_start = NULL;
 984                p_end = NULL;
 985                node_start = 0;
 986        }
 987}
 988#endif
 989