linux/arch/x86/mm/init_64.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/x86_64/mm/init.c
   3 *
   4 *  Copyright (C) 1995  Linus Torvalds
   5 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
   6 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
   7 */
   8
   9#include <linux/signal.h>
  10#include <linux/sched.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/string.h>
  14#include <linux/types.h>
  15#include <linux/ptrace.h>
  16#include <linux/mman.h>
  17#include <linux/mm.h>
  18#include <linux/swap.h>
  19#include <linux/smp.h>
  20#include <linux/init.h>
  21#include <linux/initrd.h>
  22#include <linux/pagemap.h>
  23#include <linux/bootmem.h>
  24#include <linux/memblock.h>
  25#include <linux/proc_fs.h>
  26#include <linux/pci.h>
  27#include <linux/pfn.h>
  28#include <linux/poison.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/module.h>
  31#include <linux/memory_hotplug.h>
  32#include <linux/nmi.h>
  33#include <linux/gfp.h>
  34
  35#include <asm/processor.h>
  36#include <asm/bios_ebda.h>
  37#include <asm/system.h>
  38#include <asm/uaccess.h>
  39#include <asm/pgtable.h>
  40#include <asm/pgalloc.h>
  41#include <asm/dma.h>
  42#include <asm/fixmap.h>
  43#include <asm/e820.h>
  44#include <asm/apic.h>
  45#include <asm/tlb.h>
  46#include <asm/mmu_context.h>
  47#include <asm/proto.h>
  48#include <asm/smp.h>
  49#include <asm/sections.h>
  50#include <asm/kdebug.h>
  51#include <asm/numa.h>
  52#include <asm/cacheflush.h>
  53#include <asm/init.h>
  54#include <asm/uv/uv.h>
  55#include <asm/setup.h>
  56
  57static int __init parse_direct_gbpages_off(char *arg)
  58{
  59        direct_gbpages = 0;
  60        return 0;
  61}
  62early_param("nogbpages", parse_direct_gbpages_off);
  63
  64static int __init parse_direct_gbpages_on(char *arg)
  65{
  66        direct_gbpages = 1;
  67        return 0;
  68}
  69early_param("gbpages", parse_direct_gbpages_on);
  70
  71/*
  72 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
  73 * physical space so we can cache the place of the first one and move
  74 * around without checking the pgd every time.
  75 */
  76
  77pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
  78EXPORT_SYMBOL_GPL(__supported_pte_mask);
  79
  80int force_personality32;
  81
  82/*
  83 * noexec32=on|off
  84 * Control non executable heap for 32bit processes.
  85 * To control the stack too use noexec=off
  86 *
  87 * on   PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
  88 * off  PROT_READ implies PROT_EXEC
  89 */
  90static int __init nonx32_setup(char *str)
  91{
  92        if (!strcmp(str, "on"))
  93                force_personality32 &= ~READ_IMPLIES_EXEC;
  94        else if (!strcmp(str, "off"))
  95                force_personality32 |= READ_IMPLIES_EXEC;
  96        return 1;
  97}
  98__setup("noexec32=", nonx32_setup);
  99
 100/*
 101 * When memory was added/removed make sure all the processes MM have
 102 * suitable PGD entries in the local PGD level page.
 103 */
 104void sync_global_pgds(unsigned long start, unsigned long end)
 105{
 106        unsigned long address;
 107
 108        for (address = start; address <= end; address += PGDIR_SIZE) {
 109                const pgd_t *pgd_ref = pgd_offset_k(address);
 110                struct page *page;
 111
 112                if (pgd_none(*pgd_ref))
 113                        continue;
 114
 115                spin_lock(&pgd_lock);
 116                list_for_each_entry(page, &pgd_list, lru) {
 117                        pgd_t *pgd;
 118                        spinlock_t *pgt_lock;
 119
 120                        pgd = (pgd_t *)page_address(page) + pgd_index(address);
 121                        /* the pgt_lock only for Xen */
 122                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 123                        spin_lock(pgt_lock);
 124
 125                        if (pgd_none(*pgd))
 126                                set_pgd(pgd, *pgd_ref);
 127                        else
 128                                BUG_ON(pgd_page_vaddr(*pgd)
 129                                       != pgd_page_vaddr(*pgd_ref));
 130
 131                        spin_unlock(pgt_lock);
 132                }
 133                spin_unlock(&pgd_lock);
 134        }
 135}
 136
 137/*
 138 * NOTE: This function is marked __ref because it calls __init function
 139 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 140 */
 141static __ref void *spp_getpage(void)
 142{
 143        void *ptr;
 144
 145        if (after_bootmem)
 146                ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
 147        else
 148                ptr = alloc_bootmem_pages(PAGE_SIZE);
 149
 150        if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
 151                panic("set_pte_phys: cannot allocate page data %s\n",
 152                        after_bootmem ? "after bootmem" : "");
 153        }
 154
 155        pr_debug("spp_getpage %p\n", ptr);
 156
 157        return ptr;
 158}
 159
 160static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
 161{
 162        if (pgd_none(*pgd)) {
 163                pud_t *pud = (pud_t *)spp_getpage();
 164                pgd_populate(&init_mm, pgd, pud);
 165                if (pud != pud_offset(pgd, 0))
 166                        printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
 167                               pud, pud_offset(pgd, 0));
 168        }
 169        return pud_offset(pgd, vaddr);
 170}
 171
 172static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
 173{
 174        if (pud_none(*pud)) {
 175                pmd_t *pmd = (pmd_t *) spp_getpage();
 176                pud_populate(&init_mm, pud, pmd);
 177                if (pmd != pmd_offset(pud, 0))
 178                        printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
 179                               pmd, pmd_offset(pud, 0));
 180        }
 181        return pmd_offset(pud, vaddr);
 182}
 183
 184static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
 185{
 186        if (pmd_none(*pmd)) {
 187                pte_t *pte = (pte_t *) spp_getpage();
 188                pmd_populate_kernel(&init_mm, pmd, pte);
 189                if (pte != pte_offset_kernel(pmd, 0))
 190                        printk(KERN_ERR "PAGETABLE BUG #02!\n");
 191        }
 192        return pte_offset_kernel(pmd, vaddr);
 193}
 194
 195void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
 196{
 197        pud_t *pud;
 198        pmd_t *pmd;
 199        pte_t *pte;
 200
 201        pud = pud_page + pud_index(vaddr);
 202        pmd = fill_pmd(pud, vaddr);
 203        pte = fill_pte(pmd, vaddr);
 204
 205        set_pte(pte, new_pte);
 206
 207        /*
 208         * It's enough to flush this one mapping.
 209         * (PGE mappings get flushed as well)
 210         */
 211        __flush_tlb_one(vaddr);
 212}
 213
 214void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
 215{
 216        pgd_t *pgd;
 217        pud_t *pud_page;
 218
 219        pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
 220
 221        pgd = pgd_offset_k(vaddr);
 222        if (pgd_none(*pgd)) {
 223                printk(KERN_ERR
 224                        "PGD FIXMAP MISSING, it should be setup in head.S!\n");
 225                return;
 226        }
 227        pud_page = (pud_t*)pgd_page_vaddr(*pgd);
 228        set_pte_vaddr_pud(pud_page, vaddr, pteval);
 229}
 230
 231pmd_t * __init populate_extra_pmd(unsigned long vaddr)
 232{
 233        pgd_t *pgd;
 234        pud_t *pud;
 235
 236        pgd = pgd_offset_k(vaddr);
 237        pud = fill_pud(pgd, vaddr);
 238        return fill_pmd(pud, vaddr);
 239}
 240
 241pte_t * __init populate_extra_pte(unsigned long vaddr)
 242{
 243        pmd_t *pmd;
 244
 245        pmd = populate_extra_pmd(vaddr);
 246        return fill_pte(pmd, vaddr);
 247}
 248
 249/*
 250 * Create large page table mappings for a range of physical addresses.
 251 */
 252static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
 253                                                pgprot_t prot)
 254{
 255        pgd_t *pgd;
 256        pud_t *pud;
 257        pmd_t *pmd;
 258
 259        BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
 260        for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
 261                pgd = pgd_offset_k((unsigned long)__va(phys));
 262                if (pgd_none(*pgd)) {
 263                        pud = (pud_t *) spp_getpage();
 264                        set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
 265                                                _PAGE_USER));
 266                }
 267                pud = pud_offset(pgd, (unsigned long)__va(phys));
 268                if (pud_none(*pud)) {
 269                        pmd = (pmd_t *) spp_getpage();
 270                        set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
 271                                                _PAGE_USER));
 272                }
 273                pmd = pmd_offset(pud, phys);
 274                BUG_ON(!pmd_none(*pmd));
 275                set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
 276        }
 277}
 278
 279void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
 280{
 281        __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
 282}
 283
 284void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
 285{
 286        __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
 287}
 288
 289/*
 290 * The head.S code sets up the kernel high mapping:
 291 *
 292 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
 293 *
 294 * phys_addr holds the negative offset to the kernel, which is added
 295 * to the compile time generated pmds. This results in invalid pmds up
 296 * to the point where we hit the physaddr 0 mapping.
 297 *
 298 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 299 * is rounded up to the 2MB boundary. This catches the invalid pmds as
 300 * well, as they are located before _text:
 301 */
 302void __init cleanup_highmap(void)
 303{
 304        unsigned long vaddr = __START_KERNEL_map;
 305        unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
 306        unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
 307        pmd_t *pmd = level2_kernel_pgt;
 308
 309        for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
 310                if (pmd_none(*pmd))
 311                        continue;
 312                if (vaddr < (unsigned long) _text || vaddr > end)
 313                        set_pmd(pmd, __pmd(0));
 314        }
 315}
 316
 317static __ref void *alloc_low_page(unsigned long *phys)
 318{
 319        unsigned long pfn = pgt_buf_end++;
 320        void *adr;
 321
 322        if (after_bootmem) {
 323                adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
 324                *phys = __pa(adr);
 325
 326                return adr;
 327        }
 328
 329        if (pfn >= pgt_buf_top)
 330                panic("alloc_low_page: ran out of memory");
 331
 332        adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
 333        clear_page(adr);
 334        *phys  = pfn * PAGE_SIZE;
 335        return adr;
 336}
 337
 338static __ref void *map_low_page(void *virt)
 339{
 340        void *adr;
 341        unsigned long phys, left;
 342
 343        if (after_bootmem)
 344                return virt;
 345
 346        phys = __pa(virt);
 347        left = phys & (PAGE_SIZE - 1);
 348        adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
 349        adr = (void *)(((unsigned long)adr) | left);
 350
 351        return adr;
 352}
 353
 354static __ref void unmap_low_page(void *adr)
 355{
 356        if (after_bootmem)
 357                return;
 358
 359        early_iounmap((void *)((unsigned long)adr & PAGE_MASK), PAGE_SIZE);
 360}
 361
 362static unsigned long __meminit
 363phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
 364              pgprot_t prot)
 365{
 366        unsigned pages = 0;
 367        unsigned long last_map_addr = end;
 368        int i;
 369
 370        pte_t *pte = pte_page + pte_index(addr);
 371
 372        for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
 373
 374                if (addr >= end) {
 375                        if (!after_bootmem) {
 376                                for(; i < PTRS_PER_PTE; i++, pte++)
 377                                        set_pte(pte, __pte(0));
 378                        }
 379                        break;
 380                }
 381
 382                /*
 383                 * We will re-use the existing mapping.
 384                 * Xen for example has some special requirements, like mapping
 385                 * pagetable pages as RO. So assume someone who pre-setup
 386                 * these mappings are more intelligent.
 387                 */
 388                if (pte_val(*pte)) {
 389                        pages++;
 390                        continue;
 391                }
 392
 393                if (0)
 394                        printk("   pte=%p addr=%lx pte=%016lx\n",
 395                               pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
 396                pages++;
 397                set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
 398                last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
 399        }
 400
 401        update_page_count(PG_LEVEL_4K, pages);
 402
 403        return last_map_addr;
 404}
 405
 406static unsigned long __meminit
 407phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
 408              unsigned long page_size_mask, pgprot_t prot)
 409{
 410        unsigned long pages = 0;
 411        unsigned long last_map_addr = end;
 412
 413        int i = pmd_index(address);
 414
 415        for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
 416                unsigned long pte_phys;
 417                pmd_t *pmd = pmd_page + pmd_index(address);
 418                pte_t *pte;
 419                pgprot_t new_prot = prot;
 420
 421                if (address >= end) {
 422                        if (!after_bootmem) {
 423                                for (; i < PTRS_PER_PMD; i++, pmd++)
 424                                        set_pmd(pmd, __pmd(0));
 425                        }
 426                        break;
 427                }
 428
 429                if (pmd_val(*pmd)) {
 430                        if (!pmd_large(*pmd)) {
 431                                spin_lock(&init_mm.page_table_lock);
 432                                pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd));
 433                                last_map_addr = phys_pte_init(pte, address,
 434                                                                end, prot);
 435                                unmap_low_page(pte);
 436                                spin_unlock(&init_mm.page_table_lock);
 437                                continue;
 438                        }
 439                        /*
 440                         * If we are ok with PG_LEVEL_2M mapping, then we will
 441                         * use the existing mapping,
 442                         *
 443                         * Otherwise, we will split the large page mapping but
 444                         * use the same existing protection bits except for
 445                         * large page, so that we don't violate Intel's TLB
 446                         * Application note (317080) which says, while changing
 447                         * the page sizes, new and old translations should
 448                         * not differ with respect to page frame and
 449                         * attributes.
 450                         */
 451                        if (page_size_mask & (1 << PG_LEVEL_2M)) {
 452                                pages++;
 453                                continue;
 454                        }
 455                        new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
 456                }
 457
 458                if (page_size_mask & (1<<PG_LEVEL_2M)) {
 459                        pages++;
 460                        spin_lock(&init_mm.page_table_lock);
 461                        set_pte((pte_t *)pmd,
 462                                pfn_pte(address >> PAGE_SHIFT,
 463                                        __pgprot(pgprot_val(prot) | _PAGE_PSE)));
 464                        spin_unlock(&init_mm.page_table_lock);
 465                        last_map_addr = (address & PMD_MASK) + PMD_SIZE;
 466                        continue;
 467                }
 468
 469                pte = alloc_low_page(&pte_phys);
 470                last_map_addr = phys_pte_init(pte, address, end, new_prot);
 471                unmap_low_page(pte);
 472
 473                spin_lock(&init_mm.page_table_lock);
 474                pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
 475                spin_unlock(&init_mm.page_table_lock);
 476        }
 477        update_page_count(PG_LEVEL_2M, pages);
 478        return last_map_addr;
 479}
 480
 481static unsigned long __meminit
 482phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
 483                         unsigned long page_size_mask)
 484{
 485        unsigned long pages = 0;
 486        unsigned long last_map_addr = end;
 487        int i = pud_index(addr);
 488
 489        for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
 490                unsigned long pmd_phys;
 491                pud_t *pud = pud_page + pud_index(addr);
 492                pmd_t *pmd;
 493                pgprot_t prot = PAGE_KERNEL;
 494
 495                if (addr >= end)
 496                        break;
 497
 498                if (!after_bootmem &&
 499                                !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
 500                        set_pud(pud, __pud(0));
 501                        continue;
 502                }
 503
 504                if (pud_val(*pud)) {
 505                        if (!pud_large(*pud)) {
 506                                pmd = map_low_page(pmd_offset(pud, 0));
 507                                last_map_addr = phys_pmd_init(pmd, addr, end,
 508                                                         page_size_mask, prot);
 509                                unmap_low_page(pmd);
 510                                __flush_tlb_all();
 511                                continue;
 512                        }
 513                        /*
 514                         * If we are ok with PG_LEVEL_1G mapping, then we will
 515                         * use the existing mapping.
 516                         *
 517                         * Otherwise, we will split the gbpage mapping but use
 518                         * the same existing protection  bits except for large
 519                         * page, so that we don't violate Intel's TLB
 520                         * Application note (317080) which says, while changing
 521                         * the page sizes, new and old translations should
 522                         * not differ with respect to page frame and
 523                         * attributes.
 524                         */
 525                        if (page_size_mask & (1 << PG_LEVEL_1G)) {
 526                                pages++;
 527                                continue;
 528                        }
 529                        prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
 530                }
 531
 532                if (page_size_mask & (1<<PG_LEVEL_1G)) {
 533                        pages++;
 534                        spin_lock(&init_mm.page_table_lock);
 535                        set_pte((pte_t *)pud,
 536                                pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
 537                        spin_unlock(&init_mm.page_table_lock);
 538                        last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
 539                        continue;
 540                }
 541
 542                pmd = alloc_low_page(&pmd_phys);
 543                last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
 544                                              prot);
 545                unmap_low_page(pmd);
 546
 547                spin_lock(&init_mm.page_table_lock);
 548                pud_populate(&init_mm, pud, __va(pmd_phys));
 549                spin_unlock(&init_mm.page_table_lock);
 550        }
 551        __flush_tlb_all();
 552
 553        update_page_count(PG_LEVEL_1G, pages);
 554
 555        return last_map_addr;
 556}
 557
 558unsigned long __meminit
 559kernel_physical_mapping_init(unsigned long start,
 560                             unsigned long end,
 561                             unsigned long page_size_mask)
 562{
 563        bool pgd_changed = false;
 564        unsigned long next, last_map_addr = end;
 565        unsigned long addr;
 566
 567        start = (unsigned long)__va(start);
 568        end = (unsigned long)__va(end);
 569        addr = start;
 570
 571        for (; start < end; start = next) {
 572                pgd_t *pgd = pgd_offset_k(start);
 573                unsigned long pud_phys;
 574                pud_t *pud;
 575
 576                next = (start + PGDIR_SIZE) & PGDIR_MASK;
 577                if (next > end)
 578                        next = end;
 579
 580                if (pgd_val(*pgd)) {
 581                        pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd));
 582                        last_map_addr = phys_pud_init(pud, __pa(start),
 583                                                 __pa(end), page_size_mask);
 584                        unmap_low_page(pud);
 585                        continue;
 586                }
 587
 588                pud = alloc_low_page(&pud_phys);
 589                last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
 590                                                 page_size_mask);
 591                unmap_low_page(pud);
 592
 593                spin_lock(&init_mm.page_table_lock);
 594                pgd_populate(&init_mm, pgd, __va(pud_phys));
 595                spin_unlock(&init_mm.page_table_lock);
 596                pgd_changed = true;
 597        }
 598
 599        if (pgd_changed)
 600                sync_global_pgds(addr, end);
 601
 602        __flush_tlb_all();
 603
 604        return last_map_addr;
 605}
 606
 607#ifndef CONFIG_NUMA
 608void __init initmem_init(void)
 609{
 610        memblock_x86_register_active_regions(0, 0, max_pfn);
 611}
 612#endif
 613
 614void __init paging_init(void)
 615{
 616        unsigned long max_zone_pfns[MAX_NR_ZONES];
 617
 618        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 619        max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
 620        max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
 621        max_zone_pfns[ZONE_NORMAL] = max_pfn;
 622
 623        sparse_memory_present_with_active_regions(MAX_NUMNODES);
 624        sparse_init();
 625
 626        /*
 627         * clear the default setting with node 0
 628         * note: don't use nodes_clear here, that is really clearing when
 629         *       numa support is not compiled in, and later node_set_state
 630         *       will not set it back.
 631         */
 632        node_clear_state(0, N_NORMAL_MEMORY);
 633
 634        free_area_init_nodes(max_zone_pfns);
 635}
 636
 637/*
 638 * Memory hotplug specific functions
 639 */
 640#ifdef CONFIG_MEMORY_HOTPLUG
 641/*
 642 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 643 * updating.
 644 */
 645static void  update_end_of_memory_vars(u64 start, u64 size)
 646{
 647        unsigned long end_pfn = PFN_UP(start + size);
 648
 649        if (end_pfn > max_pfn) {
 650                max_pfn = end_pfn;
 651                max_low_pfn = end_pfn;
 652                high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
 653        }
 654}
 655
 656/*
 657 * Memory is added always to NORMAL zone. This means you will never get
 658 * additional DMA/DMA32 memory.
 659 */
 660int arch_add_memory(int nid, u64 start, u64 size)
 661{
 662        struct pglist_data *pgdat = NODE_DATA(nid);
 663        struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
 664        unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
 665        unsigned long nr_pages = size >> PAGE_SHIFT;
 666        int ret;
 667
 668        last_mapped_pfn = init_memory_mapping(start, start + size);
 669        if (last_mapped_pfn > max_pfn_mapped)
 670                max_pfn_mapped = last_mapped_pfn;
 671
 672        ret = __add_pages(nid, zone, start_pfn, nr_pages);
 673        WARN_ON_ONCE(ret);
 674
 675        /* update max_pfn, max_low_pfn and high_memory */
 676        update_end_of_memory_vars(start, size);
 677
 678        return ret;
 679}
 680EXPORT_SYMBOL_GPL(arch_add_memory);
 681
 682#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
 683int memory_add_physaddr_to_nid(u64 start)
 684{
 685        return 0;
 686}
 687EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 688#endif
 689
 690#endif /* CONFIG_MEMORY_HOTPLUG */
 691
 692static struct kcore_list kcore_vsyscall;
 693
 694void __init mem_init(void)
 695{
 696        long codesize, reservedpages, datasize, initsize;
 697        unsigned long absent_pages;
 698
 699        pci_iommu_alloc();
 700
 701        /* clear_bss() already clear the empty_zero_page */
 702
 703        reservedpages = 0;
 704
 705        /* this will put all low memory onto the freelists */
 706#ifdef CONFIG_NUMA
 707        totalram_pages = numa_free_all_bootmem();
 708#else
 709        totalram_pages = free_all_bootmem();
 710#endif
 711
 712        absent_pages = absent_pages_in_range(0, max_pfn);
 713        reservedpages = max_pfn - totalram_pages - absent_pages;
 714        after_bootmem = 1;
 715
 716        codesize =  (unsigned long) &_etext - (unsigned long) &_text;
 717        datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
 718        initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
 719
 720        /* Register memory areas for /proc/kcore */
 721        kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
 722                         VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
 723
 724        printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
 725                         "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
 726                nr_free_pages() << (PAGE_SHIFT-10),
 727                max_pfn << (PAGE_SHIFT-10),
 728                codesize >> 10,
 729                absent_pages << (PAGE_SHIFT-10),
 730                reservedpages << (PAGE_SHIFT-10),
 731                datasize >> 10,
 732                initsize >> 10);
 733}
 734
 735#ifdef CONFIG_DEBUG_RODATA
 736const int rodata_test_data = 0xC3;
 737EXPORT_SYMBOL_GPL(rodata_test_data);
 738
 739int kernel_set_to_readonly;
 740
 741void set_kernel_text_rw(void)
 742{
 743        unsigned long start = PFN_ALIGN(_text);
 744        unsigned long end = PFN_ALIGN(__stop___ex_table);
 745
 746        if (!kernel_set_to_readonly)
 747                return;
 748
 749        pr_debug("Set kernel text: %lx - %lx for read write\n",
 750                 start, end);
 751
 752        /*
 753         * Make the kernel identity mapping for text RW. Kernel text
 754         * mapping will always be RO. Refer to the comment in
 755         * static_protections() in pageattr.c
 756         */
 757        set_memory_rw(start, (end - start) >> PAGE_SHIFT);
 758}
 759
 760void set_kernel_text_ro(void)
 761{
 762        unsigned long start = PFN_ALIGN(_text);
 763        unsigned long end = PFN_ALIGN(__stop___ex_table);
 764
 765        if (!kernel_set_to_readonly)
 766                return;
 767
 768        pr_debug("Set kernel text: %lx - %lx for read only\n",
 769                 start, end);
 770
 771        /*
 772         * Set the kernel identity mapping for text RO.
 773         */
 774        set_memory_ro(start, (end - start) >> PAGE_SHIFT);
 775}
 776
 777void mark_rodata_ro(void)
 778{
 779        unsigned long start = PFN_ALIGN(_text);
 780        unsigned long rodata_start =
 781                ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
 782        unsigned long end = (unsigned long) &__end_rodata_hpage_align;
 783        unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table);
 784        unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata);
 785        unsigned long data_start = (unsigned long) &_sdata;
 786
 787        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
 788               (end - start) >> 10);
 789        set_memory_ro(start, (end - start) >> PAGE_SHIFT);
 790
 791        kernel_set_to_readonly = 1;
 792
 793        /*
 794         * The rodata section (but not the kernel text!) should also be
 795         * not-executable.
 796         */
 797        set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
 798
 799        rodata_test();
 800
 801#ifdef CONFIG_CPA_DEBUG
 802        printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
 803        set_memory_rw(start, (end-start) >> PAGE_SHIFT);
 804
 805        printk(KERN_INFO "Testing CPA: again\n");
 806        set_memory_ro(start, (end-start) >> PAGE_SHIFT);
 807#endif
 808
 809        free_init_pages("unused kernel memory",
 810                        (unsigned long) page_address(virt_to_page(text_end)),
 811                        (unsigned long)
 812                                 page_address(virt_to_page(rodata_start)));
 813        free_init_pages("unused kernel memory",
 814                        (unsigned long) page_address(virt_to_page(rodata_end)),
 815                        (unsigned long) page_address(virt_to_page(data_start)));
 816}
 817
 818#endif
 819
 820int kern_addr_valid(unsigned long addr)
 821{
 822        unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
 823        pgd_t *pgd;
 824        pud_t *pud;
 825        pmd_t *pmd;
 826        pte_t *pte;
 827
 828        if (above != 0 && above != -1UL)
 829                return 0;
 830
 831        pgd = pgd_offset_k(addr);
 832        if (pgd_none(*pgd))
 833                return 0;
 834
 835        pud = pud_offset(pgd, addr);
 836        if (pud_none(*pud))
 837                return 0;
 838
 839        pmd = pmd_offset(pud, addr);
 840        if (pmd_none(*pmd))
 841                return 0;
 842
 843        if (pmd_large(*pmd))
 844                return pfn_valid(pmd_pfn(*pmd));
 845
 846        pte = pte_offset_kernel(pmd, addr);
 847        if (pte_none(*pte))
 848                return 0;
 849
 850        return pfn_valid(pte_pfn(*pte));
 851}
 852
 853/*
 854 * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
 855 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
 856 * not need special handling anymore:
 857 */
 858static struct vm_area_struct gate_vma = {
 859        .vm_start       = VSYSCALL_START,
 860        .vm_end         = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
 861        .vm_page_prot   = PAGE_READONLY_EXEC,
 862        .vm_flags       = VM_READ | VM_EXEC
 863};
 864
 865struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
 866{
 867#ifdef CONFIG_IA32_EMULATION
 868        if (!mm || mm->context.ia32_compat)
 869                return NULL;
 870#endif
 871        return &gate_vma;
 872}
 873
 874int in_gate_area(struct mm_struct *mm, unsigned long addr)
 875{
 876        struct vm_area_struct *vma = get_gate_vma(mm);
 877
 878        if (!vma)
 879                return 0;
 880
 881        return (addr >= vma->vm_start) && (addr < vma->vm_end);
 882}
 883
 884/*
 885 * Use this when you have no reliable mm, typically from interrupt
 886 * context. It is less reliable than using a task's mm and may give
 887 * false positives.
 888 */
 889int in_gate_area_no_mm(unsigned long addr)
 890{
 891        return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
 892}
 893
 894const char *arch_vma_name(struct vm_area_struct *vma)
 895{
 896        if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
 897                return "[vdso]";
 898        if (vma == &gate_vma)
 899                return "[vsyscall]";
 900        return NULL;
 901}
 902
 903#ifdef CONFIG_X86_UV
 904#define MIN_MEMORY_BLOCK_SIZE   (1 << SECTION_SIZE_BITS)
 905
 906unsigned long memory_block_size_bytes(void)
 907{
 908        if (is_uv_system()) {
 909                printk(KERN_INFO "UV: memory block size 2GB\n");
 910                return 2UL * 1024 * 1024 * 1024;
 911        }
 912        return MIN_MEMORY_BLOCK_SIZE;
 913}
 914#endif
 915
 916#ifdef CONFIG_SPARSEMEM_VMEMMAP
 917/*
 918 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
 919 */
 920static long __meminitdata addr_start, addr_end;
 921static void __meminitdata *p_start, *p_end;
 922static int __meminitdata node_start;
 923
 924int __meminit
 925vmemmap_populate(struct page *start_page, unsigned long size, int node)
 926{
 927        unsigned long addr = (unsigned long)start_page;
 928        unsigned long end = (unsigned long)(start_page + size);
 929        unsigned long next;
 930        pgd_t *pgd;
 931        pud_t *pud;
 932        pmd_t *pmd;
 933
 934        for (; addr < end; addr = next) {
 935                void *p = NULL;
 936
 937                pgd = vmemmap_pgd_populate(addr, node);
 938                if (!pgd)
 939                        return -ENOMEM;
 940
 941                pud = vmemmap_pud_populate(pgd, addr, node);
 942                if (!pud)
 943                        return -ENOMEM;
 944
 945                if (!cpu_has_pse) {
 946                        next = (addr + PAGE_SIZE) & PAGE_MASK;
 947                        pmd = vmemmap_pmd_populate(pud, addr, node);
 948
 949                        if (!pmd)
 950                                return -ENOMEM;
 951
 952                        p = vmemmap_pte_populate(pmd, addr, node);
 953
 954                        if (!p)
 955                                return -ENOMEM;
 956
 957                        addr_end = addr + PAGE_SIZE;
 958                        p_end = p + PAGE_SIZE;
 959                } else {
 960                        next = pmd_addr_end(addr, end);
 961
 962                        pmd = pmd_offset(pud, addr);
 963                        if (pmd_none(*pmd)) {
 964                                pte_t entry;
 965
 966                                p = vmemmap_alloc_block_buf(PMD_SIZE, node);
 967                                if (!p)
 968                                        return -ENOMEM;
 969
 970                                entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
 971                                                PAGE_KERNEL_LARGE);
 972                                set_pmd(pmd, __pmd(pte_val(entry)));
 973
 974                                /* check to see if we have contiguous blocks */
 975                                if (p_end != p || node_start != node) {
 976                                        if (p_start)
 977                                                printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
 978                                                       addr_start, addr_end-1, p_start, p_end-1, node_start);
 979                                        addr_start = addr;
 980                                        node_start = node;
 981                                        p_start = p;
 982                                }
 983
 984                                addr_end = addr + PMD_SIZE;
 985                                p_end = p + PMD_SIZE;
 986                        } else
 987                                vmemmap_verify((pte_t *)pmd, node, addr, next);
 988                }
 989
 990        }
 991        sync_global_pgds((unsigned long)start_page, end);
 992        return 0;
 993}
 994
 995void __meminit vmemmap_populate_print_last(void)
 996{
 997        if (p_start) {
 998                printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
 999                        addr_start, addr_end-1, p_start, p_end-1, node_start);
1000                p_start = NULL;
1001                p_end = NULL;
1002                node_start = 0;
1003        }
1004}
1005#endif
1006