linux/arch/powerpc/mm/mem.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
   6 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
   7 *    Copyright (C) 1996 Paul Mackerras
   8 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
   9 *
  10 *  Derived from "arch/i386/mm/init.c"
  11 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  12 *
  13 *  This program is free software; you can redistribute it and/or
  14 *  modify it under the terms of the GNU General Public License
  15 *  as published by the Free Software Foundation; either version
  16 *  2 of the License, or (at your option) any later version.
  17 *
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/sched.h>
  22#include <linux/kernel.h>
  23#include <linux/errno.h>
  24#include <linux/string.h>
  25#include <linux/types.h>
  26#include <linux/mm.h>
  27#include <linux/stddef.h>
  28#include <linux/init.h>
  29#include <linux/bootmem.h>
  30#include <linux/highmem.h>
  31#include <linux/initrd.h>
  32#include <linux/pagemap.h>
  33#include <linux/suspend.h>
  34#include <linux/lmb.h>
  35
  36#include <asm/pgalloc.h>
  37#include <asm/prom.h>
  38#include <asm/io.h>
  39#include <asm/mmu_context.h>
  40#include <asm/pgtable.h>
  41#include <asm/mmu.h>
  42#include <asm/smp.h>
  43#include <asm/machdep.h>
  44#include <asm/btext.h>
  45#include <asm/tlb.h>
  46#include <asm/sections.h>
  47#include <asm/sparsemem.h>
  48#include <asm/vdso.h>
  49#include <asm/fixmap.h>
  50
  51#include "mmu_decl.h"
  52
  53#ifndef CPU_FTR_COHERENT_ICACHE
  54#define CPU_FTR_COHERENT_ICACHE 0       /* XXX for now */
  55#define CPU_FTR_NOEXECUTE       0
  56#endif
  57
  58int init_bootmem_done;
  59int mem_init_done;
  60phys_addr_t memory_limit;
  61
  62#ifdef CONFIG_HIGHMEM
  63pte_t *kmap_pte;
  64pgprot_t kmap_prot;
  65
  66EXPORT_SYMBOL(kmap_prot);
  67EXPORT_SYMBOL(kmap_pte);
  68
  69static inline pte_t *virt_to_kpte(unsigned long vaddr)
  70{
  71        return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  72                        vaddr), vaddr), vaddr);
  73}
  74#endif
  75
  76int page_is_ram(unsigned long pfn)
  77{
  78#ifndef CONFIG_PPC64    /* XXX for now */
  79        return pfn < max_pfn;
  80#else
  81        unsigned long paddr = (pfn << PAGE_SHIFT);
  82        int i;
  83        for (i=0; i < lmb.memory.cnt; i++) {
  84                unsigned long base;
  85
  86                base = lmb.memory.region[i].base;
  87
  88                if ((paddr >= base) &&
  89                        (paddr < (base + lmb.memory.region[i].size))) {
  90                        return 1;
  91                }
  92        }
  93
  94        return 0;
  95#endif
  96}
  97
  98pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  99                              unsigned long size, pgprot_t vma_prot)
 100{
 101        if (ppc_md.phys_mem_access_prot)
 102                return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
 103
 104        if (!page_is_ram(pfn))
 105                vma_prot = pgprot_noncached(vma_prot);
 106
 107        return vma_prot;
 108}
 109EXPORT_SYMBOL(phys_mem_access_prot);
 110
 111#ifdef CONFIG_MEMORY_HOTPLUG
 112
 113#ifdef CONFIG_NUMA
 114int memory_add_physaddr_to_nid(u64 start)
 115{
 116        return hot_add_scn_to_nid(start);
 117}
 118#endif
 119
 120int arch_add_memory(int nid, u64 start, u64 size)
 121{
 122        struct pglist_data *pgdata;
 123        struct zone *zone;
 124        unsigned long start_pfn = start >> PAGE_SHIFT;
 125        unsigned long nr_pages = size >> PAGE_SHIFT;
 126
 127        pgdata = NODE_DATA(nid);
 128
 129        start = (unsigned long)__va(start);
 130        create_section_mapping(start, start + size);
 131
 132        /* this should work for most non-highmem platforms */
 133        zone = pgdata->node_zones;
 134
 135        return __add_pages(nid, zone, start_pfn, nr_pages);
 136}
 137#endif /* CONFIG_MEMORY_HOTPLUG */
 138
 139/*
 140 * walk_memory_resource() needs to make sure there is no holes in a given
 141 * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
 142 * Instead it maintains it in lmb.memory structures.  Walk through the
 143 * memory regions, find holes and callback for contiguous regions.
 144 */
 145int
 146walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
 147                void *arg, int (*func)(unsigned long, unsigned long, void *))
 148{
 149        struct lmb_property res;
 150        unsigned long pfn, len;
 151        u64 end;
 152        int ret = -1;
 153
 154        res.base = (u64) start_pfn << PAGE_SHIFT;
 155        res.size = (u64) nr_pages << PAGE_SHIFT;
 156
 157        end = res.base + res.size - 1;
 158        while ((res.base < end) && (lmb_find(&res) >= 0)) {
 159                pfn = (unsigned long)(res.base >> PAGE_SHIFT);
 160                len = (unsigned long)(res.size >> PAGE_SHIFT);
 161                ret = (*func)(pfn, len, arg);
 162                if (ret)
 163                        break;
 164                res.base += (res.size + 1);
 165                res.size = (end - res.base + 1);
 166        }
 167        return ret;
 168}
 169EXPORT_SYMBOL_GPL(walk_system_ram_range);
 170
 171/*
 172 * Initialize the bootmem system and give it all the memory we
 173 * have available.  If we are using highmem, we only put the
 174 * lowmem into the bootmem system.
 175 */
 176#ifndef CONFIG_NEED_MULTIPLE_NODES
 177void __init do_init_bootmem(void)
 178{
 179        unsigned long i;
 180        unsigned long start, bootmap_pages;
 181        unsigned long total_pages;
 182        int boot_mapsize;
 183
 184        max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
 185        total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
 186#ifdef CONFIG_HIGHMEM
 187        total_pages = total_lowmem >> PAGE_SHIFT;
 188        max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
 189#endif
 190
 191        /*
 192         * Find an area to use for the bootmem bitmap.  Calculate the size of
 193         * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
 194         * Add 1 additional page in case the address isn't page-aligned.
 195         */
 196        bootmap_pages = bootmem_bootmap_pages(total_pages);
 197
 198        start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
 199
 200        min_low_pfn = MEMORY_START >> PAGE_SHIFT;
 201        boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
 202
 203        /* Add active regions with valid PFNs */
 204        for (i = 0; i < lmb.memory.cnt; i++) {
 205                unsigned long start_pfn, end_pfn;
 206                start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
 207                end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
 208                add_active_range(0, start_pfn, end_pfn);
 209        }
 210
 211        /* Add all physical memory to the bootmem map, mark each area
 212         * present.
 213         */
 214#ifdef CONFIG_HIGHMEM
 215        free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
 216
 217        /* reserve the sections we're already using */
 218        for (i = 0; i < lmb.reserved.cnt; i++) {
 219                unsigned long addr = lmb.reserved.region[i].base +
 220                                     lmb_size_bytes(&lmb.reserved, i) - 1;
 221                if (addr < lowmem_end_addr)
 222                        reserve_bootmem(lmb.reserved.region[i].base,
 223                                        lmb_size_bytes(&lmb.reserved, i),
 224                                        BOOTMEM_DEFAULT);
 225                else if (lmb.reserved.region[i].base < lowmem_end_addr) {
 226                        unsigned long adjusted_size = lowmem_end_addr -
 227                                      lmb.reserved.region[i].base;
 228                        reserve_bootmem(lmb.reserved.region[i].base,
 229                                        adjusted_size, BOOTMEM_DEFAULT);
 230                }
 231        }
 232#else
 233        free_bootmem_with_active_regions(0, max_pfn);
 234
 235        /* reserve the sections we're already using */
 236        for (i = 0; i < lmb.reserved.cnt; i++)
 237                reserve_bootmem(lmb.reserved.region[i].base,
 238                                lmb_size_bytes(&lmb.reserved, i),
 239                                BOOTMEM_DEFAULT);
 240
 241#endif
 242        /* XXX need to clip this if using highmem? */
 243        sparse_memory_present_with_active_regions(0);
 244
 245        init_bootmem_done = 1;
 246}
 247
 248/* mark pages that don't exist as nosave */
 249static int __init mark_nonram_nosave(void)
 250{
 251        unsigned long lmb_next_region_start_pfn,
 252                      lmb_region_max_pfn;
 253        int i;
 254
 255        for (i = 0; i < lmb.memory.cnt - 1; i++) {
 256                lmb_region_max_pfn =
 257                        (lmb.memory.region[i].base >> PAGE_SHIFT) +
 258                        (lmb.memory.region[i].size >> PAGE_SHIFT);
 259                lmb_next_region_start_pfn =
 260                        lmb.memory.region[i+1].base >> PAGE_SHIFT;
 261
 262                if (lmb_region_max_pfn < lmb_next_region_start_pfn)
 263                        register_nosave_region(lmb_region_max_pfn,
 264                                               lmb_next_region_start_pfn);
 265        }
 266
 267        return 0;
 268}
 269
 270/*
 271 * paging_init() sets up the page tables - in fact we've already done this.
 272 */
 273void __init paging_init(void)
 274{
 275        unsigned long total_ram = lmb_phys_mem_size();
 276        phys_addr_t top_of_ram = lmb_end_of_DRAM();
 277        unsigned long max_zone_pfns[MAX_NR_ZONES];
 278
 279#ifdef CONFIG_PPC32
 280        unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
 281        unsigned long end = __fix_to_virt(FIX_HOLE);
 282
 283        for (; v < end; v += PAGE_SIZE)
 284                map_page(v, 0, 0); /* XXX gross */
 285#endif
 286
 287#ifdef CONFIG_HIGHMEM
 288        map_page(PKMAP_BASE, 0, 0);     /* XXX gross */
 289        pkmap_page_table = virt_to_kpte(PKMAP_BASE);
 290
 291        kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
 292        kmap_prot = PAGE_KERNEL;
 293#endif /* CONFIG_HIGHMEM */
 294
 295        printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%lx\n",
 296               (unsigned long long)top_of_ram, total_ram);
 297        printk(KERN_DEBUG "Memory hole size: %ldMB\n",
 298               (long int)((top_of_ram - total_ram) >> 20));
 299        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 300#ifdef CONFIG_HIGHMEM
 301        max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
 302        max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
 303#else
 304        max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
 305#endif
 306        free_area_init_nodes(max_zone_pfns);
 307
 308        mark_nonram_nosave();
 309}
 310#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
 311
 312void __init mem_init(void)
 313{
 314#ifdef CONFIG_NEED_MULTIPLE_NODES
 315        int nid;
 316#endif
 317        pg_data_t *pgdat;
 318        unsigned long i;
 319        struct page *page;
 320        unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
 321
 322        num_physpages = lmb.memory.size >> PAGE_SHIFT;
 323        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 324
 325#ifdef CONFIG_NEED_MULTIPLE_NODES
 326        for_each_online_node(nid) {
 327                if (NODE_DATA(nid)->node_spanned_pages != 0) {
 328                        printk("freeing bootmem node %d\n", nid);
 329                        totalram_pages +=
 330                                free_all_bootmem_node(NODE_DATA(nid));
 331                }
 332        }
 333#else
 334        max_mapnr = max_pfn;
 335        totalram_pages += free_all_bootmem();
 336#endif
 337        for_each_online_pgdat(pgdat) {
 338                for (i = 0; i < pgdat->node_spanned_pages; i++) {
 339                        if (!pfn_valid(pgdat->node_start_pfn + i))
 340                                continue;
 341                        page = pgdat_page_nr(pgdat, i);
 342                        if (PageReserved(page))
 343                                reservedpages++;
 344                }
 345        }
 346
 347        codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
 348        datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
 349        initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
 350        bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
 351
 352#ifdef CONFIG_HIGHMEM
 353        {
 354                unsigned long pfn, highmem_mapnr;
 355
 356                highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
 357                for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
 358                        struct page *page = pfn_to_page(pfn);
 359                        if (lmb_is_reserved(pfn << PAGE_SHIFT))
 360                                continue;
 361                        ClearPageReserved(page);
 362                        init_page_count(page);
 363                        __free_page(page);
 364                        totalhigh_pages++;
 365                        reservedpages--;
 366                }
 367                totalram_pages += totalhigh_pages;
 368                printk(KERN_DEBUG "High memory: %luk\n",
 369                       totalhigh_pages << (PAGE_SHIFT-10));
 370        }
 371#endif /* CONFIG_HIGHMEM */
 372
 373        printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
 374               "%luk reserved, %luk data, %luk bss, %luk init)\n",
 375                nr_free_pages() << (PAGE_SHIFT-10),
 376                num_physpages << (PAGE_SHIFT-10),
 377                codesize >> 10,
 378                reservedpages << (PAGE_SHIFT-10),
 379                datasize >> 10,
 380                bsssize >> 10,
 381                initsize >> 10);
 382
 383#ifdef CONFIG_PPC32
 384        pr_info("Kernel virtual memory layout:\n");
 385        pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
 386#ifdef CONFIG_HIGHMEM
 387        pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
 388                PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
 389#endif /* CONFIG_HIGHMEM */
 390#ifdef CONFIG_NOT_COHERENT_CACHE
 391        pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
 392                IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
 393#endif /* CONFIG_NOT_COHERENT_CACHE */
 394        pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
 395                ioremap_bot, IOREMAP_TOP);
 396        pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
 397                VMALLOC_START, VMALLOC_END);
 398#endif /* CONFIG_PPC32 */
 399
 400        mem_init_done = 1;
 401}
 402
 403/*
 404 * This is called when a page has been modified by the kernel.
 405 * It just marks the page as not i-cache clean.  We do the i-cache
 406 * flush later when the page is given to a user process, if necessary.
 407 */
 408void flush_dcache_page(struct page *page)
 409{
 410        if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
 411                return;
 412        /* avoid an atomic op if possible */
 413        if (test_bit(PG_arch_1, &page->flags))
 414                clear_bit(PG_arch_1, &page->flags);
 415}
 416EXPORT_SYMBOL(flush_dcache_page);
 417
 418void flush_dcache_icache_page(struct page *page)
 419{
 420#ifdef CONFIG_BOOKE
 421        void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
 422        __flush_dcache_icache(start);
 423        kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
 424#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
 425        /* On 8xx there is no need to kmap since highmem is not supported */
 426        __flush_dcache_icache(page_address(page)); 
 427#else
 428        __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 429#endif
 430
 431}
 432void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
 433{
 434        clear_page(page);
 435
 436        /*
 437         * We shouldnt have to do this, but some versions of glibc
 438         * require it (ld.so assumes zero filled pages are icache clean)
 439         * - Anton
 440         */
 441        flush_dcache_page(pg);
 442}
 443EXPORT_SYMBOL(clear_user_page);
 444
 445void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 446                    struct page *pg)
 447{
 448        copy_page(vto, vfrom);
 449
 450        /*
 451         * We should be able to use the following optimisation, however
 452         * there are two problems.
 453         * Firstly a bug in some versions of binutils meant PLT sections
 454         * were not marked executable.
 455         * Secondly the first word in the GOT section is blrl, used
 456         * to establish the GOT address. Until recently the GOT was
 457         * not marked executable.
 458         * - Anton
 459         */
 460#if 0
 461        if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
 462                return;
 463#endif
 464
 465        flush_dcache_page(pg);
 466}
 467
 468void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 469                             unsigned long addr, int len)
 470{
 471        unsigned long maddr;
 472
 473        maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
 474        flush_icache_range(maddr, maddr + len);
 475        kunmap(page);
 476}
 477EXPORT_SYMBOL(flush_icache_user_range);
 478
 479/*
 480 * This is called at the end of handling a user page fault, when the
 481 * fault has been handled by updating a PTE in the linux page tables.
 482 * We use it to preload an HPTE into the hash table corresponding to
 483 * the updated linux PTE.
 484 * 
 485 * This must always be called with the pte lock held.
 486 */
 487void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 488                      pte_t pte)
 489{
 490#ifdef CONFIG_PPC_STD_MMU
 491        unsigned long access = 0, trap;
 492
 493        /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
 494        if (!pte_young(pte) || address >= TASK_SIZE)
 495                return;
 496
 497        /* We try to figure out if we are coming from an instruction
 498         * access fault and pass that down to __hash_page so we avoid
 499         * double-faulting on execution of fresh text. We have to test
 500         * for regs NULL since init will get here first thing at boot
 501         *
 502         * We also avoid filling the hash if not coming from a fault
 503         */
 504        if (current->thread.regs == NULL)
 505                return;
 506        trap = TRAP(current->thread.regs);
 507        if (trap == 0x400)
 508                access |= _PAGE_EXEC;
 509        else if (trap != 0x300)
 510                return;
 511        hash_preload(vma->vm_mm, address, access, trap);
 512#endif /* CONFIG_PPC_STD_MMU */
 513}
 514