linux/arch/x86/mm/init_64.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/x86_64/mm/init.c
   3 *
   4 *  Copyright (C) 1995  Linus Torvalds
   5 *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
   6 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
   7 */
   8
   9#include <linux/signal.h>
  10#include <linux/sched.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/string.h>
  14#include <linux/types.h>
  15#include <linux/ptrace.h>
  16#include <linux/mman.h>
  17#include <linux/mm.h>
  18#include <linux/swap.h>
  19#include <linux/smp.h>
  20#include <linux/init.h>
  21#include <linux/initrd.h>
  22#include <linux/pagemap.h>
  23#include <linux/bootmem.h>
  24#include <linux/proc_fs.h>
  25#include <linux/pci.h>
  26#include <linux/pfn.h>
  27#include <linux/poison.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/module.h>
  30#include <linux/memory_hotplug.h>
  31#include <linux/nmi.h>
  32
  33#include <asm/processor.h>
  34#include <asm/bios_ebda.h>
  35#include <asm/system.h>
  36#include <asm/uaccess.h>
  37#include <asm/pgtable.h>
  38#include <asm/pgalloc.h>
  39#include <asm/dma.h>
  40#include <asm/fixmap.h>
  41#include <asm/e820.h>
  42#include <asm/apic.h>
  43#include <asm/tlb.h>
  44#include <asm/mmu_context.h>
  45#include <asm/proto.h>
  46#include <asm/smp.h>
  47#include <asm/sections.h>
  48#include <asm/kdebug.h>
  49#include <asm/numa.h>
  50#include <asm/cacheflush.h>
  51#include <asm/init.h>
  52
  53static unsigned long dma_reserve __initdata;
  54
  55static int __init parse_direct_gbpages_off(char *arg)
  56{
  57        direct_gbpages = 0;
  58        return 0;
  59}
  60early_param("nogbpages", parse_direct_gbpages_off);
  61
  62static int __init parse_direct_gbpages_on(char *arg)
  63{
  64        direct_gbpages = 1;
  65        return 0;
  66}
  67early_param("gbpages", parse_direct_gbpages_on);
  68
  69/*
  70 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
  71 * physical space so we can cache the place of the first one and move
  72 * around without checking the pgd every time.
  73 */
  74
  75pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
  76EXPORT_SYMBOL_GPL(__supported_pte_mask);
  77
  78int force_personality32;
  79
  80/*
  81 * noexec32=on|off
  82 * Control non executable heap for 32bit processes.
  83 * To control the stack too use noexec=off
  84 *
  85 * on   PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
  86 * off  PROT_READ implies PROT_EXEC
  87 */
  88static int __init nonx32_setup(char *str)
  89{
  90        if (!strcmp(str, "on"))
  91                force_personality32 &= ~READ_IMPLIES_EXEC;
  92        else if (!strcmp(str, "off"))
  93                force_personality32 |= READ_IMPLIES_EXEC;
  94        return 1;
  95}
  96__setup("noexec32=", nonx32_setup);
  97
  98/*
  99 * NOTE: This function is marked __ref because it calls __init function
 100 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 101 */
 102static __ref void *spp_getpage(void)
 103{
 104        void *ptr;
 105
 106        if (after_bootmem)
 107                ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
 108        else
 109                ptr = alloc_bootmem_pages(PAGE_SIZE);
 110
 111        if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
 112                panic("set_pte_phys: cannot allocate page data %s\n",
 113                        after_bootmem ? "after bootmem" : "");
 114        }
 115
 116        pr_debug("spp_getpage %p\n", ptr);
 117
 118        return ptr;
 119}
 120
 121static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
 122{
 123        if (pgd_none(*pgd)) {
 124                pud_t *pud = (pud_t *)spp_getpage();
 125                pgd_populate(&init_mm, pgd, pud);
 126                if (pud != pud_offset(pgd, 0))
 127                        printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
 128                               pud, pud_offset(pgd, 0));
 129        }
 130        return pud_offset(pgd, vaddr);
 131}
 132
 133static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
 134{
 135        if (pud_none(*pud)) {
 136                pmd_t *pmd = (pmd_t *) spp_getpage();
 137                pud_populate(&init_mm, pud, pmd);
 138                if (pmd != pmd_offset(pud, 0))
 139                        printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
 140                               pmd, pmd_offset(pud, 0));
 141        }
 142        return pmd_offset(pud, vaddr);
 143}
 144
 145static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
 146{
 147        if (pmd_none(*pmd)) {
 148                pte_t *pte = (pte_t *) spp_getpage();
 149                pmd_populate_kernel(&init_mm, pmd, pte);
 150                if (pte != pte_offset_kernel(pmd, 0))
 151                        printk(KERN_ERR "PAGETABLE BUG #02!\n");
 152        }
 153        return pte_offset_kernel(pmd, vaddr);
 154}
 155
 156void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
 157{
 158        pud_t *pud;
 159        pmd_t *pmd;
 160        pte_t *pte;
 161
 162        pud = pud_page + pud_index(vaddr);
 163        pmd = fill_pmd(pud, vaddr);
 164        pte = fill_pte(pmd, vaddr);
 165
 166        set_pte(pte, new_pte);
 167
 168        /*
 169         * It's enough to flush this one mapping.
 170         * (PGE mappings get flushed as well)
 171         */
 172        __flush_tlb_one(vaddr);
 173}
 174
 175void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
 176{
 177        pgd_t *pgd;
 178        pud_t *pud_page;
 179
 180        pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
 181
 182        pgd = pgd_offset_k(vaddr);
 183        if (pgd_none(*pgd)) {
 184                printk(KERN_ERR
 185                        "PGD FIXMAP MISSING, it should be setup in head.S!\n");
 186                return;
 187        }
 188        pud_page = (pud_t*)pgd_page_vaddr(*pgd);
 189        set_pte_vaddr_pud(pud_page, vaddr, pteval);
 190}
 191
 192pmd_t * __init populate_extra_pmd(unsigned long vaddr)
 193{
 194        pgd_t *pgd;
 195        pud_t *pud;
 196
 197        pgd = pgd_offset_k(vaddr);
 198        pud = fill_pud(pgd, vaddr);
 199        return fill_pmd(pud, vaddr);
 200}
 201
 202pte_t * __init populate_extra_pte(unsigned long vaddr)
 203{
 204        pmd_t *pmd;
 205
 206        pmd = populate_extra_pmd(vaddr);
 207        return fill_pte(pmd, vaddr);
 208}
 209
 210/*
 211 * Create large page table mappings for a range of physical addresses.
 212 */
 213static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
 214                                                pgprot_t prot)
 215{
 216        pgd_t *pgd;
 217        pud_t *pud;
 218        pmd_t *pmd;
 219
 220        BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
 221        for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
 222                pgd = pgd_offset_k((unsigned long)__va(phys));
 223                if (pgd_none(*pgd)) {
 224                        pud = (pud_t *) spp_getpage();
 225                        set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
 226                                                _PAGE_USER));
 227                }
 228                pud = pud_offset(pgd, (unsigned long)__va(phys));
 229                if (pud_none(*pud)) {
 230                        pmd = (pmd_t *) spp_getpage();
 231                        set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
 232                                                _PAGE_USER));
 233                }
 234                pmd = pmd_offset(pud, phys);
 235                BUG_ON(!pmd_none(*pmd));
 236                set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
 237        }
 238}
 239
 240void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
 241{
 242        __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
 243}
 244
 245void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
 246{
 247        __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
 248}
 249
 250/*
 251 * The head.S code sets up the kernel high mapping:
 252 *
 253 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
 254 *
 255 * phys_addr holds the negative offset to the kernel, which is added
 256 * to the compile time generated pmds. This results in invalid pmds up
 257 * to the point where we hit the physaddr 0 mapping.
 258 *
 259 * We limit the mappings to the region from _text to _end.  _end is
 260 * rounded up to the 2MB boundary. This catches the invalid pmds as
 261 * well, as they are located before _text:
 262 */
 263void __init cleanup_highmap(void)
 264{
 265        unsigned long vaddr = __START_KERNEL_map;
 266        unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
 267        pmd_t *pmd = level2_kernel_pgt;
 268        pmd_t *last_pmd = pmd + PTRS_PER_PMD;
 269
 270        for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
 271                if (pmd_none(*pmd))
 272                        continue;
 273                if (vaddr < (unsigned long) _text || vaddr > end)
 274                        set_pmd(pmd, __pmd(0));
 275        }
 276}
 277
 278static __ref void *alloc_low_page(unsigned long *phys)
 279{
 280        unsigned long pfn = e820_table_end++;
 281        void *adr;
 282
 283        if (after_bootmem) {
 284                adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
 285                *phys = __pa(adr);
 286
 287                return adr;
 288        }
 289
 290        if (pfn >= e820_table_top)
 291                panic("alloc_low_page: ran out of memory");
 292
 293        adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
 294        memset(adr, 0, PAGE_SIZE);
 295        *phys  = pfn * PAGE_SIZE;
 296        return adr;
 297}
 298
 299static __ref void unmap_low_page(void *adr)
 300{
 301        if (after_bootmem)
 302                return;
 303
 304        early_iounmap(adr, PAGE_SIZE);
 305}
 306
 307static unsigned long __meminit
 308phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
 309              pgprot_t prot)
 310{
 311        unsigned pages = 0;
 312        unsigned long last_map_addr = end;
 313        int i;
 314
 315        pte_t *pte = pte_page + pte_index(addr);
 316
 317        for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
 318
 319                if (addr >= end) {
 320                        if (!after_bootmem) {
 321                                for(; i < PTRS_PER_PTE; i++, pte++)
 322                                        set_pte(pte, __pte(0));
 323                        }
 324                        break;
 325                }
 326
 327                /*
 328                 * We will re-use the existing mapping.
 329                 * Xen for example has some special requirements, like mapping
 330                 * pagetable pages as RO. So assume someone who pre-setup
 331                 * these mappings are more intelligent.
 332                 */
 333                if (pte_val(*pte)) {
 334                        pages++;
 335                        continue;
 336                }
 337
 338                if (0)
 339                        printk("   pte=%p addr=%lx pte=%016lx\n",
 340                               pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
 341                pages++;
 342                set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
 343                last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
 344        }
 345
 346        update_page_count(PG_LEVEL_4K, pages);
 347
 348        return last_map_addr;
 349}
 350
 351static unsigned long __meminit
 352phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end,
 353                pgprot_t prot)
 354{
 355        pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
 356
 357        return phys_pte_init(pte, address, end, prot);
 358}
 359
 360static unsigned long __meminit
 361phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
 362              unsigned long page_size_mask, pgprot_t prot)
 363{
 364        unsigned long pages = 0;
 365        unsigned long last_map_addr = end;
 366
 367        int i = pmd_index(address);
 368
 369        for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
 370                unsigned long pte_phys;
 371                pmd_t *pmd = pmd_page + pmd_index(address);
 372                pte_t *pte;
 373                pgprot_t new_prot = prot;
 374
 375                if (address >= end) {
 376                        if (!after_bootmem) {
 377                                for (; i < PTRS_PER_PMD; i++, pmd++)
 378                                        set_pmd(pmd, __pmd(0));
 379                        }
 380                        break;
 381                }
 382
 383                if (pmd_val(*pmd)) {
 384                        if (!pmd_large(*pmd)) {
 385                                spin_lock(&init_mm.page_table_lock);
 386                                last_map_addr = phys_pte_update(pmd, address,
 387                                                                end, prot);
 388                                spin_unlock(&init_mm.page_table_lock);
 389                                continue;
 390                        }
 391                        /*
 392                         * If we are ok with PG_LEVEL_2M mapping, then we will
 393                         * use the existing mapping,
 394                         *
 395                         * Otherwise, we will split the large page mapping but
 396                         * use the same existing protection bits except for
 397                         * large page, so that we don't violate Intel's TLB
 398                         * Application note (317080) which says, while changing
 399                         * the page sizes, new and old translations should
 400                         * not differ with respect to page frame and
 401                         * attributes.
 402                         */
 403                        if (page_size_mask & (1 << PG_LEVEL_2M)) {
 404                                pages++;
 405                                continue;
 406                        }
 407                        new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
 408                }
 409
 410                if (page_size_mask & (1<<PG_LEVEL_2M)) {
 411                        pages++;
 412                        spin_lock(&init_mm.page_table_lock);
 413                        set_pte((pte_t *)pmd,
 414                                pfn_pte(address >> PAGE_SHIFT,
 415                                        __pgprot(pgprot_val(prot) | _PAGE_PSE)));
 416                        spin_unlock(&init_mm.page_table_lock);
 417                        last_map_addr = (address & PMD_MASK) + PMD_SIZE;
 418                        continue;
 419                }
 420
 421                pte = alloc_low_page(&pte_phys);
 422                last_map_addr = phys_pte_init(pte, address, end, new_prot);
 423                unmap_low_page(pte);
 424
 425                spin_lock(&init_mm.page_table_lock);
 426                pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
 427                spin_unlock(&init_mm.page_table_lock);
 428        }
 429        update_page_count(PG_LEVEL_2M, pages);
 430        return last_map_addr;
 431}
 432
 433static unsigned long __meminit
 434phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
 435                unsigned long page_size_mask, pgprot_t prot)
 436{
 437        pmd_t *pmd = pmd_offset(pud, 0);
 438        unsigned long last_map_addr;
 439
 440        last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot);
 441        __flush_tlb_all();
 442        return last_map_addr;
 443}
 444
 445static unsigned long __meminit
 446phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
 447                         unsigned long page_size_mask)
 448{
 449        unsigned long pages = 0;
 450        unsigned long last_map_addr = end;
 451        int i = pud_index(addr);
 452
 453        for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
 454                unsigned long pmd_phys;
 455                pud_t *pud = pud_page + pud_index(addr);
 456                pmd_t *pmd;
 457                pgprot_t prot = PAGE_KERNEL;
 458
 459                if (addr >= end)
 460                        break;
 461
 462                if (!after_bootmem &&
 463                                !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
 464                        set_pud(pud, __pud(0));
 465                        continue;
 466                }
 467
 468                if (pud_val(*pud)) {
 469                        if (!pud_large(*pud)) {
 470                                last_map_addr = phys_pmd_update(pud, addr, end,
 471                                                         page_size_mask, prot);
 472                                continue;
 473                        }
 474                        /*
 475                         * If we are ok with PG_LEVEL_1G mapping, then we will
 476                         * use the existing mapping.
 477                         *
 478                         * Otherwise, we will split the gbpage mapping but use
 479                         * the same existing protection  bits except for large
 480                         * page, so that we don't violate Intel's TLB
 481                         * Application note (317080) which says, while changing
 482                         * the page sizes, new and old translations should
 483                         * not differ with respect to page frame and
 484                         * attributes.
 485                         */
 486                        if (page_size_mask & (1 << PG_LEVEL_1G)) {
 487                                pages++;
 488                                continue;
 489                        }
 490                        prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
 491                }
 492
 493                if (page_size_mask & (1<<PG_LEVEL_1G)) {
 494                        pages++;
 495                        spin_lock(&init_mm.page_table_lock);
 496                        set_pte((pte_t *)pud,
 497                                pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
 498                        spin_unlock(&init_mm.page_table_lock);
 499                        last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
 500                        continue;
 501                }
 502
 503                pmd = alloc_low_page(&pmd_phys);
 504                last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
 505                                              prot);
 506                unmap_low_page(pmd);
 507
 508                spin_lock(&init_mm.page_table_lock);
 509                pud_populate(&init_mm, pud, __va(pmd_phys));
 510                spin_unlock(&init_mm.page_table_lock);
 511        }
 512        __flush_tlb_all();
 513
 514        update_page_count(PG_LEVEL_1G, pages);
 515
 516        return last_map_addr;
 517}
 518
 519static unsigned long __meminit
 520phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
 521                 unsigned long page_size_mask)
 522{
 523        pud_t *pud;
 524
 525        pud = (pud_t *)pgd_page_vaddr(*pgd);
 526
 527        return phys_pud_init(pud, addr, end, page_size_mask);
 528}
 529
 530unsigned long __meminit
 531kernel_physical_mapping_init(unsigned long start,
 532                             unsigned long end,
 533                             unsigned long page_size_mask)
 534{
 535
 536        unsigned long next, last_map_addr = end;
 537
 538        start = (unsigned long)__va(start);
 539        end = (unsigned long)__va(end);
 540
 541        for (; start < end; start = next) {
 542                pgd_t *pgd = pgd_offset_k(start);
 543                unsigned long pud_phys;
 544                pud_t *pud;
 545
 546                next = (start + PGDIR_SIZE) & PGDIR_MASK;
 547                if (next > end)
 548                        next = end;
 549
 550                if (pgd_val(*pgd)) {
 551                        last_map_addr = phys_pud_update(pgd, __pa(start),
 552                                                 __pa(end), page_size_mask);
 553                        continue;
 554                }
 555
 556                pud = alloc_low_page(&pud_phys);
 557                last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
 558                                                 page_size_mask);
 559                unmap_low_page(pud);
 560
 561                spin_lock(&init_mm.page_table_lock);
 562                pgd_populate(&init_mm, pgd, __va(pud_phys));
 563                spin_unlock(&init_mm.page_table_lock);
 564        }
 565        __flush_tlb_all();
 566
 567        return last_map_addr;
 568}
 569
 570#ifndef CONFIG_NUMA
 571void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn)
 572{
 573        unsigned long bootmap_size, bootmap;
 574
 575        bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
 576        bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size,
 577                                 PAGE_SIZE);
 578        if (bootmap == -1L)
 579                panic("Cannot find bootmem map of size %ld\n", bootmap_size);
 580        /* don't touch min_low_pfn */
 581        bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
 582                                         0, end_pfn);
 583        e820_register_active_regions(0, start_pfn, end_pfn);
 584        free_bootmem_with_active_regions(0, end_pfn);
 585        early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
 586        reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
 587}
 588#endif
 589
 590void __init paging_init(void)
 591{
 592        unsigned long max_zone_pfns[MAX_NR_ZONES];
 593
 594        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 595        max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
 596        max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
 597        max_zone_pfns[ZONE_NORMAL] = max_pfn;
 598
 599        sparse_memory_present_with_active_regions(MAX_NUMNODES);
 600        sparse_init();
 601
 602        /*
 603         * clear the default setting with node 0
 604         * note: don't use nodes_clear here, that is really clearing when
 605         *       numa support is not compiled in, and later node_set_state
 606         *       will not set it back.
 607         */
 608        node_clear_state(0, N_NORMAL_MEMORY);
 609
 610        free_area_init_nodes(max_zone_pfns);
 611}
 612
 613/*
 614 * Memory hotplug specific functions
 615 */
 616#ifdef CONFIG_MEMORY_HOTPLUG
 617/*
 618 * Memory is added always to NORMAL zone. This means you will never get
 619 * additional DMA/DMA32 memory.
 620 */
 621int arch_add_memory(int nid, u64 start, u64 size)
 622{
 623        struct pglist_data *pgdat = NODE_DATA(nid);
 624        struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
 625        unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
 626        unsigned long nr_pages = size >> PAGE_SHIFT;
 627        int ret;
 628
 629        last_mapped_pfn = init_memory_mapping(start, start + size);
 630        if (last_mapped_pfn > max_pfn_mapped)
 631                max_pfn_mapped = last_mapped_pfn;
 632
 633        ret = __add_pages(nid, zone, start_pfn, nr_pages);
 634        WARN_ON_ONCE(ret);
 635
 636        return ret;
 637}
 638EXPORT_SYMBOL_GPL(arch_add_memory);
 639
 640#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
 641int memory_add_physaddr_to_nid(u64 start)
 642{
 643        return 0;
 644}
 645EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 646#endif
 647
 648#endif /* CONFIG_MEMORY_HOTPLUG */
 649
 650static struct kcore_list kcore_vsyscall;
 651
 652void __init mem_init(void)
 653{
 654        long codesize, reservedpages, datasize, initsize;
 655        unsigned long absent_pages;
 656
 657        pci_iommu_alloc();
 658
 659        /* clear_bss() already clear the empty_zero_page */
 660
 661        reservedpages = 0;
 662
 663        /* this will put all low memory onto the freelists */
 664#ifdef CONFIG_NUMA
 665        totalram_pages = numa_free_all_bootmem();
 666#else
 667        totalram_pages = free_all_bootmem();
 668#endif
 669
 670        absent_pages = absent_pages_in_range(0, max_pfn);
 671        reservedpages = max_pfn - totalram_pages - absent_pages;
 672        after_bootmem = 1;
 673
 674        codesize =  (unsigned long) &_etext - (unsigned long) &_text;
 675        datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
 676        initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
 677
 678        /* Register memory areas for /proc/kcore */
 679        kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
 680                         VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
 681
 682        printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
 683                         "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
 684                nr_free_pages() << (PAGE_SHIFT-10),
 685                max_pfn << (PAGE_SHIFT-10),
 686                codesize >> 10,
 687                absent_pages << (PAGE_SHIFT-10),
 688                reservedpages << (PAGE_SHIFT-10),
 689                datasize >> 10,
 690                initsize >> 10);
 691}
 692
 693#ifdef CONFIG_DEBUG_RODATA
 694const int rodata_test_data = 0xC3;
 695EXPORT_SYMBOL_GPL(rodata_test_data);
 696
 697static int kernel_set_to_readonly;
 698
 699void set_kernel_text_rw(void)
 700{
 701        unsigned long start = PFN_ALIGN(_stext);
 702        unsigned long end = PFN_ALIGN(__start_rodata);
 703
 704        if (!kernel_set_to_readonly)
 705                return;
 706
 707        pr_debug("Set kernel text: %lx - %lx for read write\n",
 708                 start, end);
 709
 710        set_memory_rw(start, (end - start) >> PAGE_SHIFT);
 711}
 712
 713void set_kernel_text_ro(void)
 714{
 715        unsigned long start = PFN_ALIGN(_stext);
 716        unsigned long end = PFN_ALIGN(__start_rodata);
 717
 718        if (!kernel_set_to_readonly)
 719                return;
 720
 721        pr_debug("Set kernel text: %lx - %lx for read only\n",
 722                 start, end);
 723
 724        set_memory_ro(start, (end - start) >> PAGE_SHIFT);
 725}
 726
 727void mark_rodata_ro(void)
 728{
 729        unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
 730        unsigned long rodata_start =
 731                ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
 732
 733        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
 734               (end - start) >> 10);
 735        set_memory_ro(start, (end - start) >> PAGE_SHIFT);
 736
 737        kernel_set_to_readonly = 1;
 738
 739        /*
 740         * The rodata section (but not the kernel text!) should also be
 741         * not-executable.
 742         */
 743        set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
 744
 745        rodata_test();
 746
 747#ifdef CONFIG_CPA_DEBUG
 748        printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
 749        set_memory_rw(start, (end-start) >> PAGE_SHIFT);
 750
 751        printk(KERN_INFO "Testing CPA: again\n");
 752        set_memory_ro(start, (end-start) >> PAGE_SHIFT);
 753#endif
 754}
 755
 756#endif
 757
 758int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
 759                                   int flags)
 760{
 761#ifdef CONFIG_NUMA
 762        int nid, next_nid;
 763        int ret;
 764#endif
 765        unsigned long pfn = phys >> PAGE_SHIFT;
 766
 767        if (pfn >= max_pfn) {
 768                /*
 769                 * This can happen with kdump kernels when accessing
 770                 * firmware tables:
 771                 */
 772                if (pfn < max_pfn_mapped)
 773                        return -EFAULT;
 774
 775                printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n",
 776                                phys, len);
 777                return -EFAULT;
 778        }
 779
 780        /* Should check here against the e820 map to avoid double free */
 781#ifdef CONFIG_NUMA
 782        nid = phys_to_nid(phys);
 783        next_nid = phys_to_nid(phys + len - 1);
 784        if (nid == next_nid)
 785                ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags);
 786        else
 787                ret = reserve_bootmem(phys, len, flags);
 788
 789        if (ret != 0)
 790                return ret;
 791
 792#else
 793        reserve_bootmem(phys, len, flags);
 794#endif
 795
 796        if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
 797                dma_reserve += len / PAGE_SIZE;
 798                set_dma_reserve(dma_reserve);
 799        }
 800
 801        return 0;
 802}
 803
 804int kern_addr_valid(unsigned long addr)
 805{
 806        unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
 807        pgd_t *pgd;
 808        pud_t *pud;
 809        pmd_t *pmd;
 810        pte_t *pte;
 811
 812        if (above != 0 && above != -1UL)
 813                return 0;
 814
 815        pgd = pgd_offset_k(addr);
 816        if (pgd_none(*pgd))
 817                return 0;
 818
 819        pud = pud_offset(pgd, addr);
 820        if (pud_none(*pud))
 821                return 0;
 822
 823        pmd = pmd_offset(pud, addr);
 824        if (pmd_none(*pmd))
 825                return 0;
 826
 827        if (pmd_large(*pmd))
 828                return pfn_valid(pmd_pfn(*pmd));
 829
 830        pte = pte_offset_kernel(pmd, addr);
 831        if (pte_none(*pte))
 832                return 0;
 833
 834        return pfn_valid(pte_pfn(*pte));
 835}
 836
 837/*
 838 * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
 839 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
 840 * not need special handling anymore:
 841 */
 842static struct vm_area_struct gate_vma = {
 843        .vm_start       = VSYSCALL_START,
 844        .vm_end         = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
 845        .vm_page_prot   = PAGE_READONLY_EXEC,
 846        .vm_flags       = VM_READ | VM_EXEC
 847};
 848
 849struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
 850{
 851#ifdef CONFIG_IA32_EMULATION
 852        if (test_tsk_thread_flag(tsk, TIF_IA32))
 853                return NULL;
 854#endif
 855        return &gate_vma;
 856}
 857
 858int in_gate_area(struct task_struct *task, unsigned long addr)
 859{
 860        struct vm_area_struct *vma = get_gate_vma(task);
 861
 862        if (!vma)
 863                return 0;
 864
 865        return (addr >= vma->vm_start) && (addr < vma->vm_end);
 866}
 867
 868/*
 869 * Use this when you have no reliable task/vma, typically from interrupt
 870 * context. It is less reliable than using the task's vma and may give
 871 * false positives:
 872 */
 873int in_gate_area_no_task(unsigned long addr)
 874{
 875        return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
 876}
 877
 878const char *arch_vma_name(struct vm_area_struct *vma)
 879{
 880        if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
 881                return "[vdso]";
 882        if (vma == &gate_vma)
 883                return "[vsyscall]";
 884        return NULL;
 885}
 886
 887#ifdef CONFIG_SPARSEMEM_VMEMMAP
 888/*
 889 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
 890 */
 891static long __meminitdata addr_start, addr_end;
 892static void __meminitdata *p_start, *p_end;
 893static int __meminitdata node_start;
 894
 895int __meminit
 896vmemmap_populate(struct page *start_page, unsigned long size, int node)
 897{
 898        unsigned long addr = (unsigned long)start_page;
 899        unsigned long end = (unsigned long)(start_page + size);
 900        unsigned long next;
 901        pgd_t *pgd;
 902        pud_t *pud;
 903        pmd_t *pmd;
 904
 905        for (; addr < end; addr = next) {
 906                void *p = NULL;
 907
 908                pgd = vmemmap_pgd_populate(addr, node);
 909                if (!pgd)
 910                        return -ENOMEM;
 911
 912                pud = vmemmap_pud_populate(pgd, addr, node);
 913                if (!pud)
 914                        return -ENOMEM;
 915
 916                if (!cpu_has_pse) {
 917                        next = (addr + PAGE_SIZE) & PAGE_MASK;
 918                        pmd = vmemmap_pmd_populate(pud, addr, node);
 919
 920                        if (!pmd)
 921                                return -ENOMEM;
 922
 923                        p = vmemmap_pte_populate(pmd, addr, node);
 924
 925                        if (!p)
 926                                return -ENOMEM;
 927
 928                        addr_end = addr + PAGE_SIZE;
 929                        p_end = p + PAGE_SIZE;
 930                } else {
 931                        next = pmd_addr_end(addr, end);
 932
 933                        pmd = pmd_offset(pud, addr);
 934                        if (pmd_none(*pmd)) {
 935                                pte_t entry;
 936
 937                                p = vmemmap_alloc_block(PMD_SIZE, node);
 938                                if (!p)
 939                                        return -ENOMEM;
 940
 941                                entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
 942                                                PAGE_KERNEL_LARGE);
 943                                set_pmd(pmd, __pmd(pte_val(entry)));
 944
 945                                /* check to see if we have contiguous blocks */
 946                                if (p_end != p || node_start != node) {
 947                                        if (p_start)
 948                                                printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
 949                                                       addr_start, addr_end-1, p_start, p_end-1, node_start);
 950                                        addr_start = addr;
 951                                        node_start = node;
 952                                        p_start = p;
 953                                }
 954
 955                                addr_end = addr + PMD_SIZE;
 956                                p_end = p + PMD_SIZE;
 957                        } else
 958                                vmemmap_verify((pte_t *)pmd, node, addr, next);
 959                }
 960
 961        }
 962        return 0;
 963}
 964
 965void __meminit vmemmap_populate_print_last(void)
 966{
 967        if (p_start) {
 968                printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
 969                        addr_start, addr_end-1, p_start, p_end-1, node_start);
 970                p_start = NULL;
 971                p_end = NULL;
 972                node_start = 0;
 973        }
 974}
 975#endif
 976