linux/arch/arm/mm/mmu.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/mmu.c
   3 *
   4 *  Copyright (C) 1995-2005 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/module.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/init.h>
  14#include <linux/bootmem.h>
  15#include <linux/mman.h>
  16#include <linux/nodemask.h>
  17
  18#include <asm/cputype.h>
  19#include <asm/mach-types.h>
  20#include <asm/sections.h>
  21#include <asm/cachetype.h>
  22#include <asm/setup.h>
  23#include <asm/sizes.h>
  24#include <asm/smp_plat.h>
  25#include <asm/tlb.h>
  26#include <asm/highmem.h>
  27
  28#include <asm/mach/arch.h>
  29#include <asm/mach/map.h>
  30
  31#include "mm.h"
  32
  33DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  34
  35/*
  36 * empty_zero_page is a special page that is used for
  37 * zero-initialized data and COW.
  38 */
  39struct page *empty_zero_page;
  40EXPORT_SYMBOL(empty_zero_page);
  41
  42/*
  43 * The pmd table for the upper-most set of pages.
  44 */
  45pmd_t *top_pmd;
  46
  47#define CPOLICY_UNCACHED        0
  48#define CPOLICY_BUFFERED        1
  49#define CPOLICY_WRITETHROUGH    2
  50#define CPOLICY_WRITEBACK       3
  51#define CPOLICY_WRITEALLOC      4
  52
  53static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
  54static unsigned int ecc_mask __initdata = 0;
  55pgprot_t pgprot_user;
  56pgprot_t pgprot_kernel;
  57
  58EXPORT_SYMBOL(pgprot_user);
  59EXPORT_SYMBOL(pgprot_kernel);
  60
  61struct cachepolicy {
  62        const char      policy[16];
  63        unsigned int    cr_mask;
  64        unsigned int    pmd;
  65        unsigned int    pte;
  66};
  67
  68static struct cachepolicy cache_policies[] __initdata = {
  69        {
  70                .policy         = "uncached",
  71                .cr_mask        = CR_W|CR_C,
  72                .pmd            = PMD_SECT_UNCACHED,
  73                .pte            = L_PTE_MT_UNCACHED,
  74        }, {
  75                .policy         = "buffered",
  76                .cr_mask        = CR_C,
  77                .pmd            = PMD_SECT_BUFFERED,
  78                .pte            = L_PTE_MT_BUFFERABLE,
  79        }, {
  80                .policy         = "writethrough",
  81                .cr_mask        = 0,
  82                .pmd            = PMD_SECT_WT,
  83                .pte            = L_PTE_MT_WRITETHROUGH,
  84        }, {
  85                .policy         = "writeback",
  86                .cr_mask        = 0,
  87                .pmd            = PMD_SECT_WB,
  88                .pte            = L_PTE_MT_WRITEBACK,
  89        }, {
  90                .policy         = "writealloc",
  91                .cr_mask        = 0,
  92                .pmd            = PMD_SECT_WBWA,
  93                .pte            = L_PTE_MT_WRITEALLOC,
  94        }
  95};
  96
  97/*
  98 * These are useful for identifying cache coherency
  99 * problems by allowing the cache or the cache and
 100 * writebuffer to be turned off.  (Note: the write
 101 * buffer should not be on and the cache off).
 102 */
 103static void __init early_cachepolicy(char **p)
 104{
 105        int i;
 106
 107        for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
 108                int len = strlen(cache_policies[i].policy);
 109
 110                if (memcmp(*p, cache_policies[i].policy, len) == 0) {
 111                        cachepolicy = i;
 112                        cr_alignment &= ~cache_policies[i].cr_mask;
 113                        cr_no_alignment &= ~cache_policies[i].cr_mask;
 114                        *p += len;
 115                        break;
 116                }
 117        }
 118        if (i == ARRAY_SIZE(cache_policies))
 119                printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
 120        /*
 121         * This restriction is partly to do with the way we boot; it is
 122         * unpredictable to have memory mapped using two different sets of
 123         * memory attributes (shared, type, and cache attribs).  We can not
 124         * change these attributes once the initial assembly has setup the
 125         * page tables.
 126         */
 127        if (cpu_architecture() >= CPU_ARCH_ARMv6) {
 128                printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
 129                cachepolicy = CPOLICY_WRITEBACK;
 130        }
 131        flush_cache_all();
 132        set_cr(cr_alignment);
 133}
 134__early_param("cachepolicy=", early_cachepolicy);
 135
 136static void __init early_nocache(char **__unused)
 137{
 138        char *p = "buffered";
 139        printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
 140        early_cachepolicy(&p);
 141}
 142__early_param("nocache", early_nocache);
 143
 144static void __init early_nowrite(char **__unused)
 145{
 146        char *p = "uncached";
 147        printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
 148        early_cachepolicy(&p);
 149}
 150__early_param("nowb", early_nowrite);
 151
 152static void __init early_ecc(char **p)
 153{
 154        if (memcmp(*p, "on", 2) == 0) {
 155                ecc_mask = PMD_PROTECTION;
 156                *p += 2;
 157        } else if (memcmp(*p, "off", 3) == 0) {
 158                ecc_mask = 0;
 159                *p += 3;
 160        }
 161}
 162__early_param("ecc=", early_ecc);
 163
 164static int __init noalign_setup(char *__unused)
 165{
 166        cr_alignment &= ~CR_A;
 167        cr_no_alignment &= ~CR_A;
 168        set_cr(cr_alignment);
 169        return 1;
 170}
 171__setup("noalign", noalign_setup);
 172
 173#ifndef CONFIG_SMP
 174void adjust_cr(unsigned long mask, unsigned long set)
 175{
 176        unsigned long flags;
 177
 178        mask &= ~CR_A;
 179
 180        set &= mask;
 181
 182        local_irq_save(flags);
 183
 184        cr_no_alignment = (cr_no_alignment & ~mask) | set;
 185        cr_alignment = (cr_alignment & ~mask) | set;
 186
 187        set_cr((get_cr() & ~mask) | set);
 188
 189        local_irq_restore(flags);
 190}
 191#endif
 192
 193#define PROT_PTE_DEVICE         L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
 194#define PROT_SECT_DEVICE        PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 195
 196static struct mem_type mem_types[] = {
 197        [MT_DEVICE] = {           /* Strongly ordered / ARMv6 shared device */
 198                .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
 199                                  L_PTE_SHARED,
 200                .prot_l1        = PMD_TYPE_TABLE,
 201                .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_S,
 202                .domain         = DOMAIN_IO,
 203        },
 204        [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
 205                .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
 206                .prot_l1        = PMD_TYPE_TABLE,
 207                .prot_sect      = PROT_SECT_DEVICE,
 208                .domain         = DOMAIN_IO,
 209        },
 210        [MT_DEVICE_CACHED] = {    /* ioremap_cached */
 211                .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
 212                .prot_l1        = PMD_TYPE_TABLE,
 213                .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_WB,
 214                .domain         = DOMAIN_IO,
 215        },      
 216        [MT_DEVICE_WC] = {      /* ioremap_wc */
 217                .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
 218                .prot_l1        = PMD_TYPE_TABLE,
 219                .prot_sect      = PROT_SECT_DEVICE,
 220                .domain         = DOMAIN_IO,
 221        },
 222        [MT_UNCACHED] = {
 223                .prot_pte       = PROT_PTE_DEVICE,
 224                .prot_l1        = PMD_TYPE_TABLE,
 225                .prot_sect      = PMD_TYPE_SECT | PMD_SECT_XN,
 226                .domain         = DOMAIN_IO,
 227        },
 228        [MT_CACHECLEAN] = {
 229                .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
 230                .domain    = DOMAIN_KERNEL,
 231        },
 232        [MT_MINICLEAN] = {
 233                .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
 234                .domain    = DOMAIN_KERNEL,
 235        },
 236        [MT_LOW_VECTORS] = {
 237                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 238                                L_PTE_EXEC,
 239                .prot_l1   = PMD_TYPE_TABLE,
 240                .domain    = DOMAIN_USER,
 241        },
 242        [MT_HIGH_VECTORS] = {
 243                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 244                                L_PTE_USER | L_PTE_EXEC,
 245                .prot_l1   = PMD_TYPE_TABLE,
 246                .domain    = DOMAIN_USER,
 247        },
 248        [MT_MEMORY] = {
 249                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 250                .domain    = DOMAIN_KERNEL,
 251        },
 252        [MT_ROM] = {
 253                .prot_sect = PMD_TYPE_SECT,
 254                .domain    = DOMAIN_KERNEL,
 255        },
 256        [MT_MEMORY_NONCACHED] = {
 257                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 258                .domain    = DOMAIN_KERNEL,
 259        },
 260};
 261
 262const struct mem_type *get_mem_type(unsigned int type)
 263{
 264        return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
 265}
 266EXPORT_SYMBOL(get_mem_type);
 267
 268/*
 269 * Adjust the PMD section entries according to the CPU in use.
 270 */
 271static void __init build_mem_type_table(void)
 272{
 273        struct cachepolicy *cp;
 274        unsigned int cr = get_cr();
 275        unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
 276        int cpu_arch = cpu_architecture();
 277        int i;
 278
 279        if (cpu_arch < CPU_ARCH_ARMv6) {
 280#if defined(CONFIG_CPU_DCACHE_DISABLE)
 281                if (cachepolicy > CPOLICY_BUFFERED)
 282                        cachepolicy = CPOLICY_BUFFERED;
 283#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
 284                if (cachepolicy > CPOLICY_WRITETHROUGH)
 285                        cachepolicy = CPOLICY_WRITETHROUGH;
 286#endif
 287        }
 288        if (cpu_arch < CPU_ARCH_ARMv5) {
 289                if (cachepolicy >= CPOLICY_WRITEALLOC)
 290                        cachepolicy = CPOLICY_WRITEBACK;
 291                ecc_mask = 0;
 292        }
 293#ifdef CONFIG_SMP
 294        cachepolicy = CPOLICY_WRITEALLOC;
 295#endif
 296
 297        /*
 298         * Strip out features not present on earlier architectures.
 299         * Pre-ARMv5 CPUs don't have TEX bits.  Pre-ARMv6 CPUs or those
 300         * without extended page tables don't have the 'Shared' bit.
 301         */
 302        if (cpu_arch < CPU_ARCH_ARMv5)
 303                for (i = 0; i < ARRAY_SIZE(mem_types); i++)
 304                        mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
 305        if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
 306                for (i = 0; i < ARRAY_SIZE(mem_types); i++)
 307                        mem_types[i].prot_sect &= ~PMD_SECT_S;
 308
 309        /*
 310         * ARMv5 and lower, bit 4 must be set for page tables (was: cache
 311         * "update-able on write" bit on ARM610).  However, Xscale and
 312         * Xscale3 require this bit to be cleared.
 313         */
 314        if (cpu_is_xscale() || cpu_is_xsc3()) {
 315                for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 316                        mem_types[i].prot_sect &= ~PMD_BIT4;
 317                        mem_types[i].prot_l1 &= ~PMD_BIT4;
 318                }
 319        } else if (cpu_arch < CPU_ARCH_ARMv6) {
 320                for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 321                        if (mem_types[i].prot_l1)
 322                                mem_types[i].prot_l1 |= PMD_BIT4;
 323                        if (mem_types[i].prot_sect)
 324                                mem_types[i].prot_sect |= PMD_BIT4;
 325                }
 326        }
 327
 328        /*
 329         * Mark the device areas according to the CPU/architecture.
 330         */
 331        if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
 332                if (!cpu_is_xsc3()) {
 333                        /*
 334                         * Mark device regions on ARMv6+ as execute-never
 335                         * to prevent speculative instruction fetches.
 336                         */
 337                        mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
 338                        mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
 339                        mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
 340                        mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
 341                }
 342                if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
 343                        /*
 344                         * For ARMv7 with TEX remapping,
 345                         * - shared device is SXCB=1100
 346                         * - nonshared device is SXCB=0100
 347                         * - write combine device mem is SXCB=0001
 348                         * (Uncached Normal memory)
 349                         */
 350                        mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
 351                        mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
 352                        mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
 353                } else if (cpu_is_xsc3()) {
 354                        /*
 355                         * For Xscale3,
 356                         * - shared device is TEXCB=00101
 357                         * - nonshared device is TEXCB=01000
 358                         * - write combine device mem is TEXCB=00100
 359                         * (Inner/Outer Uncacheable in xsc3 parlance)
 360                         */
 361                        mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
 362                        mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
 363                        mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
 364                } else {
 365                        /*
 366                         * For ARMv6 and ARMv7 without TEX remapping,
 367                         * - shared device is TEXCB=00001
 368                         * - nonshared device is TEXCB=01000
 369                         * - write combine device mem is TEXCB=00100
 370                         * (Uncached Normal in ARMv6 parlance).
 371                         */
 372                        mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
 373                        mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
 374                        mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
 375                }
 376        } else {
 377                /*
 378                 * On others, write combining is "Uncached/Buffered"
 379                 */
 380                mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
 381        }
 382
 383        /*
 384         * Now deal with the memory-type mappings
 385         */
 386        cp = &cache_policies[cachepolicy];
 387        vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
 388
 389#ifndef CONFIG_SMP
 390        /*
 391         * Only use write-through for non-SMP systems
 392         */
 393        if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
 394                vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
 395#endif
 396
 397        /*
 398         * Enable CPU-specific coherency if supported.
 399         * (Only available on XSC3 at the moment.)
 400         */
 401        if (arch_is_coherent() && cpu_is_xsc3())
 402                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 403
 404        /*
 405         * ARMv6 and above have extended page tables.
 406         */
 407        if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
 408                /*
 409                 * Mark cache clean areas and XIP ROM read only
 410                 * from SVC mode and no access from userspace.
 411                 */
 412                mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 413                mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 414                mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 415
 416#ifdef CONFIG_SMP
 417                /*
 418                 * Mark memory with the "shared" attribute for SMP systems
 419                 */
 420                user_pgprot |= L_PTE_SHARED;
 421                kern_pgprot |= L_PTE_SHARED;
 422                vecs_pgprot |= L_PTE_SHARED;
 423                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 424                mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 425#endif
 426        }
 427
 428        /*
 429         * Non-cacheable Normal - intended for memory areas that must
 430         * not cause dirty cache line writebacks when used
 431         */
 432        if (cpu_arch >= CPU_ARCH_ARMv6) {
 433                if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
 434                        /* Non-cacheable Normal is XCB = 001 */
 435                        mem_types[MT_MEMORY_NONCACHED].prot_sect |=
 436                                PMD_SECT_BUFFERED;
 437                } else {
 438                        /* For both ARMv6 and non-TEX-remapping ARMv7 */
 439                        mem_types[MT_MEMORY_NONCACHED].prot_sect |=
 440                                PMD_SECT_TEX(1);
 441                }
 442        } else {
 443                mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
 444        }
 445
 446        for (i = 0; i < 16; i++) {
 447                unsigned long v = pgprot_val(protection_map[i]);
 448                protection_map[i] = __pgprot(v | user_pgprot);
 449        }
 450
 451        mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
 452        mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
 453
 454        pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
 455        pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
 456                                 L_PTE_DIRTY | L_PTE_WRITE |
 457                                 L_PTE_EXEC | kern_pgprot);
 458
 459        mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
 460        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
 461        mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
 462        mem_types[MT_ROM].prot_sect |= cp->pmd;
 463
 464        switch (cp->pmd) {
 465        case PMD_SECT_WT:
 466                mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
 467                break;
 468        case PMD_SECT_WB:
 469        case PMD_SECT_WBWA:
 470                mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
 471                break;
 472        }
 473        printk("Memory policy: ECC %sabled, Data cache %s\n",
 474                ecc_mask ? "en" : "dis", cp->policy);
 475
 476        for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 477                struct mem_type *t = &mem_types[i];
 478                if (t->prot_l1)
 479                        t->prot_l1 |= PMD_DOMAIN(t->domain);
 480                if (t->prot_sect)
 481                        t->prot_sect |= PMD_DOMAIN(t->domain);
 482        }
 483}
 484
 485#define vectors_base()  (vectors_high() ? 0xffff0000 : 0)
 486
 487static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
 488                                  unsigned long end, unsigned long pfn,
 489                                  const struct mem_type *type)
 490{
 491        pte_t *pte;
 492
 493        if (pmd_none(*pmd)) {
 494                pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
 495                __pmd_populate(pmd, __pa(pte) | type->prot_l1);
 496        }
 497
 498        pte = pte_offset_kernel(pmd, addr);
 499        do {
 500                set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
 501                pfn++;
 502        } while (pte++, addr += PAGE_SIZE, addr != end);
 503}
 504
 505static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
 506                                      unsigned long end, unsigned long phys,
 507                                      const struct mem_type *type)
 508{
 509        pmd_t *pmd = pmd_offset(pgd, addr);
 510
 511        /*
 512         * Try a section mapping - end, addr and phys must all be aligned
 513         * to a section boundary.  Note that PMDs refer to the individual
 514         * L1 entries, whereas PGDs refer to a group of L1 entries making
 515         * up one logical pointer to an L2 table.
 516         */
 517        if (((addr | end | phys) & ~SECTION_MASK) == 0) {
 518                pmd_t *p = pmd;
 519
 520                if (addr & SECTION_SIZE)
 521                        pmd++;
 522
 523                do {
 524                        *pmd = __pmd(phys | type->prot_sect);
 525                        phys += SECTION_SIZE;
 526                } while (pmd++, addr += SECTION_SIZE, addr != end);
 527
 528                flush_pmd_entry(p);
 529        } else {
 530                /*
 531                 * No need to loop; pte's aren't interested in the
 532                 * individual L1 entries.
 533                 */
 534                alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
 535        }
 536}
 537
 538static void __init create_36bit_mapping(struct map_desc *md,
 539                                        const struct mem_type *type)
 540{
 541        unsigned long phys, addr, length, end;
 542        pgd_t *pgd;
 543
 544        addr = md->virtual;
 545        phys = (unsigned long)__pfn_to_phys(md->pfn);
 546        length = PAGE_ALIGN(md->length);
 547
 548        if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
 549                printk(KERN_ERR "MM: CPU does not support supersection "
 550                       "mapping for 0x%08llx at 0x%08lx\n",
 551                       __pfn_to_phys((u64)md->pfn), addr);
 552                return;
 553        }
 554
 555        /* N.B. ARMv6 supersections are only defined to work with domain 0.
 556         *      Since domain assignments can in fact be arbitrary, the
 557         *      'domain == 0' check below is required to insure that ARMv6
 558         *      supersections are only allocated for domain 0 regardless
 559         *      of the actual domain assignments in use.
 560         */
 561        if (type->domain) {
 562                printk(KERN_ERR "MM: invalid domain in supersection "
 563                       "mapping for 0x%08llx at 0x%08lx\n",
 564                       __pfn_to_phys((u64)md->pfn), addr);
 565                return;
 566        }
 567
 568        if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
 569                printk(KERN_ERR "MM: cannot create mapping for "
 570                       "0x%08llx at 0x%08lx invalid alignment\n",
 571                       __pfn_to_phys((u64)md->pfn), addr);
 572                return;
 573        }
 574
 575        /*
 576         * Shift bits [35:32] of address into bits [23:20] of PMD
 577         * (See ARMv6 spec).
 578         */
 579        phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
 580
 581        pgd = pgd_offset_k(addr);
 582        end = addr + length;
 583        do {
 584                pmd_t *pmd = pmd_offset(pgd, addr);
 585                int i;
 586
 587                for (i = 0; i < 16; i++)
 588                        *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
 589
 590                addr += SUPERSECTION_SIZE;
 591                phys += SUPERSECTION_SIZE;
 592                pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
 593        } while (addr != end);
 594}
 595
 596/*
 597 * Create the page directory entries and any necessary
 598 * page tables for the mapping specified by `md'.  We
 599 * are able to cope here with varying sizes and address
 600 * offsets, and we take full advantage of sections and
 601 * supersections.
 602 */
 603void __init create_mapping(struct map_desc *md)
 604{
 605        unsigned long phys, addr, length, end;
 606        const struct mem_type *type;
 607        pgd_t *pgd;
 608
 609        if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
 610                printk(KERN_WARNING "BUG: not creating mapping for "
 611                       "0x%08llx at 0x%08lx in user region\n",
 612                       __pfn_to_phys((u64)md->pfn), md->virtual);
 613                return;
 614        }
 615
 616        if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
 617            md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
 618                printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
 619                       "overlaps vmalloc space\n",
 620                       __pfn_to_phys((u64)md->pfn), md->virtual);
 621        }
 622
 623        type = &mem_types[md->type];
 624
 625        /*
 626         * Catch 36-bit addresses
 627         */
 628        if (md->pfn >= 0x100000) {
 629                create_36bit_mapping(md, type);
 630                return;
 631        }
 632
 633        addr = md->virtual & PAGE_MASK;
 634        phys = (unsigned long)__pfn_to_phys(md->pfn);
 635        length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
 636
 637        if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
 638                printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
 639                       "be mapped using pages, ignoring.\n",
 640                       __pfn_to_phys(md->pfn), addr);
 641                return;
 642        }
 643
 644        pgd = pgd_offset_k(addr);
 645        end = addr + length;
 646        do {
 647                unsigned long next = pgd_addr_end(addr, end);
 648
 649                alloc_init_section(pgd, addr, next, phys, type);
 650
 651                phys += next - addr;
 652                addr = next;
 653        } while (pgd++, addr != end);
 654}
 655
 656/*
 657 * Create the architecture specific mappings
 658 */
 659void __init iotable_init(struct map_desc *io_desc, int nr)
 660{
 661        int i;
 662
 663        for (i = 0; i < nr; i++)
 664                create_mapping(io_desc + i);
 665}
 666
 667static unsigned long __initdata vmalloc_reserve = SZ_128M;
 668
 669/*
 670 * vmalloc=size forces the vmalloc area to be exactly 'size'
 671 * bytes. This can be used to increase (or decrease) the vmalloc
 672 * area - the default is 128m.
 673 */
 674static void __init early_vmalloc(char **arg)
 675{
 676        vmalloc_reserve = memparse(*arg, arg);
 677
 678        if (vmalloc_reserve < SZ_16M) {
 679                vmalloc_reserve = SZ_16M;
 680                printk(KERN_WARNING
 681                        "vmalloc area too small, limiting to %luMB\n",
 682                        vmalloc_reserve >> 20);
 683        }
 684
 685        if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
 686                vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
 687                printk(KERN_WARNING
 688                        "vmalloc area is too big, limiting to %luMB\n",
 689                        vmalloc_reserve >> 20);
 690        }
 691}
 692__early_param("vmalloc=", early_vmalloc);
 693
 694#define VMALLOC_MIN     (void *)(VMALLOC_END - vmalloc_reserve)
 695
 696static void __init sanity_check_meminfo(void)
 697{
 698        int i, j, highmem = 0;
 699
 700        for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
 701                struct membank *bank = &meminfo.bank[j];
 702                *bank = meminfo.bank[i];
 703
 704#ifdef CONFIG_HIGHMEM
 705                if (__va(bank->start) > VMALLOC_MIN ||
 706                    __va(bank->start) < (void *)PAGE_OFFSET)
 707                        highmem = 1;
 708
 709                bank->highmem = highmem;
 710
 711                /*
 712                 * Split those memory banks which are partially overlapping
 713                 * the vmalloc area greatly simplifying things later.
 714                 */
 715                if (__va(bank->start) < VMALLOC_MIN &&
 716                    bank->size > VMALLOC_MIN - __va(bank->start)) {
 717                        if (meminfo.nr_banks >= NR_BANKS) {
 718                                printk(KERN_CRIT "NR_BANKS too low, "
 719                                                 "ignoring high memory\n");
 720                        } else {
 721                                memmove(bank + 1, bank,
 722                                        (meminfo.nr_banks - i) * sizeof(*bank));
 723                                meminfo.nr_banks++;
 724                                i++;
 725                                bank[1].size -= VMALLOC_MIN - __va(bank->start);
 726                                bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
 727                                bank[1].highmem = highmem = 1;
 728                                j++;
 729                        }
 730                        bank->size = VMALLOC_MIN - __va(bank->start);
 731                }
 732#else
 733                bank->highmem = highmem;
 734
 735                /*
 736                 * Check whether this memory bank would entirely overlap
 737                 * the vmalloc area.
 738                 */
 739                if (__va(bank->start) >= VMALLOC_MIN ||
 740                    __va(bank->start) < (void *)PAGE_OFFSET) {
 741                        printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
 742                               "(vmalloc region overlap).\n",
 743                               bank->start, bank->start + bank->size - 1);
 744                        continue;
 745                }
 746
 747                /*
 748                 * Check whether this memory bank would partially overlap
 749                 * the vmalloc area.
 750                 */
 751                if (__va(bank->start + bank->size) > VMALLOC_MIN ||
 752                    __va(bank->start + bank->size) < __va(bank->start)) {
 753                        unsigned long newsize = VMALLOC_MIN - __va(bank->start);
 754                        printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
 755                               "to -%.8lx (vmalloc region overlap).\n",
 756                               bank->start, bank->start + bank->size - 1,
 757                               bank->start + newsize - 1);
 758                        bank->size = newsize;
 759                }
 760#endif
 761                j++;
 762        }
 763#ifdef CONFIG_HIGHMEM
 764        if (highmem) {
 765                const char *reason = NULL;
 766
 767                if (cache_is_vipt_aliasing()) {
 768                        /*
 769                         * Interactions between kmap and other mappings
 770                         * make highmem support with aliasing VIPT caches
 771                         * rather difficult.
 772                         */
 773                        reason = "with VIPT aliasing cache";
 774#ifdef CONFIG_SMP
 775                } else if (tlb_ops_need_broadcast()) {
 776                        /*
 777                         * kmap_high needs to occasionally flush TLB entries,
 778                         * however, if the TLB entries need to be broadcast
 779                         * we may deadlock:
 780                         *  kmap_high(irqs off)->flush_all_zero_pkmaps->
 781                         *  flush_tlb_kernel_range->smp_call_function_many
 782                         *   (must not be called with irqs off)
 783                         */
 784                        reason = "without hardware TLB ops broadcasting";
 785#endif
 786                }
 787                if (reason) {
 788                        printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
 789                                reason);
 790                        while (j > 0 && meminfo.bank[j - 1].highmem)
 791                                j--;
 792                }
 793        }
 794#endif
 795        meminfo.nr_banks = j;
 796}
 797
 798static inline void prepare_page_table(void)
 799{
 800        unsigned long addr;
 801
 802        /*
 803         * Clear out all the mappings below the kernel image.
 804         */
 805        for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
 806                pmd_clear(pmd_off_k(addr));
 807
 808#ifdef CONFIG_XIP_KERNEL
 809        /* The XIP kernel is mapped in the module area -- skip over it */
 810        addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
 811#endif
 812        for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
 813                pmd_clear(pmd_off_k(addr));
 814
 815        /*
 816         * Clear out all the kernel space mappings, except for the first
 817         * memory bank, up to the end of the vmalloc region.
 818         */
 819        for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
 820             addr < VMALLOC_END; addr += PGDIR_SIZE)
 821                pmd_clear(pmd_off_k(addr));
 822}
 823
 824/*
 825 * Reserve the various regions of node 0
 826 */
 827void __init reserve_node_zero(pg_data_t *pgdat)
 828{
 829        unsigned long res_size = 0;
 830
 831        /*
 832         * Register the kernel text and data with bootmem.
 833         * Note that this can only be in node 0.
 834         */
 835#ifdef CONFIG_XIP_KERNEL
 836        reserve_bootmem_node(pgdat, __pa(_data), _end - _data,
 837                        BOOTMEM_DEFAULT);
 838#else
 839        reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext,
 840                        BOOTMEM_DEFAULT);
 841#endif
 842
 843        /*
 844         * Reserve the page tables.  These are already in use,
 845         * and can only be in node 0.
 846         */
 847        reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
 848                             PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT);
 849
 850        /*
 851         * Hmm... This should go elsewhere, but we really really need to
 852         * stop things allocating the low memory; ideally we need a better
 853         * implementation of GFP_DMA which does not assume that DMA-able
 854         * memory starts at zero.
 855         */
 856        if (machine_is_integrator() || machine_is_cintegrator())
 857                res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
 858
 859        /*
 860         * These should likewise go elsewhere.  They pre-reserve the
 861         * screen memory region at the start of main system memory.
 862         */
 863        if (machine_is_edb7211())
 864                res_size = 0x00020000;
 865        if (machine_is_p720t())
 866                res_size = 0x00014000;
 867
 868        /* H1940 and RX3715 need to reserve this for suspend */
 869
 870        if (machine_is_h1940() || machine_is_rx3715()) {
 871                reserve_bootmem_node(pgdat, 0x30003000, 0x1000,
 872                                BOOTMEM_DEFAULT);
 873                reserve_bootmem_node(pgdat, 0x30081000, 0x1000,
 874                                BOOTMEM_DEFAULT);
 875        }
 876
 877        if (machine_is_palmld() || machine_is_palmtx()) {
 878                reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
 879                                BOOTMEM_EXCLUSIVE);
 880                reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
 881                                BOOTMEM_EXCLUSIVE);
 882        }
 883
 884        if (machine_is_treo680()) {
 885                reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
 886                                BOOTMEM_EXCLUSIVE);
 887                reserve_bootmem_node(pgdat, 0xa2000000, 0x1000,
 888                                BOOTMEM_EXCLUSIVE);
 889        }
 890
 891        if (machine_is_palmt5())
 892                reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
 893                                BOOTMEM_EXCLUSIVE);
 894
 895        /*
 896         * U300 - This platform family can share physical memory
 897         * between two ARM cpus, one running Linux and the other
 898         * running another OS.
 899         */
 900        if (machine_is_u300()) {
 901#ifdef CONFIG_MACH_U300_SINGLE_RAM
 902#if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) &&    \
 903        CONFIG_MACH_U300_2MB_ALIGNMENT_FIX
 904                res_size = 0x00100000;
 905#endif
 906#endif
 907        }
 908
 909#ifdef CONFIG_SA1111
 910        /*
 911         * Because of the SA1111 DMA bug, we want to preserve our
 912         * precious DMA-able memory...
 913         */
 914        res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
 915#endif
 916        if (res_size)
 917                reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size,
 918                                BOOTMEM_DEFAULT);
 919}
 920
 921/*
 922 * Set up device the mappings.  Since we clear out the page tables for all
 923 * mappings above VMALLOC_END, we will remove any debug device mappings.
 924 * This means you have to be careful how you debug this function, or any
 925 * called function.  This means you can't use any function or debugging
 926 * method which may touch any device, otherwise the kernel _will_ crash.
 927 */
 928static void __init devicemaps_init(struct machine_desc *mdesc)
 929{
 930        struct map_desc map;
 931        unsigned long addr;
 932        void *vectors;
 933
 934        /*
 935         * Allocate the vector page early.
 936         */
 937        vectors = alloc_bootmem_low_pages(PAGE_SIZE);
 938
 939        for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
 940                pmd_clear(pmd_off_k(addr));
 941
 942        /*
 943         * Map the kernel if it is XIP.
 944         * It is always first in the modulearea.
 945         */
 946#ifdef CONFIG_XIP_KERNEL
 947        map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
 948        map.virtual = MODULES_VADDR;
 949        map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
 950        map.type = MT_ROM;
 951        create_mapping(&map);
 952#endif
 953
 954        /*
 955         * Map the cache flushing regions.
 956         */
 957#ifdef FLUSH_BASE
 958        map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
 959        map.virtual = FLUSH_BASE;
 960        map.length = SZ_1M;
 961        map.type = MT_CACHECLEAN;
 962        create_mapping(&map);
 963#endif
 964#ifdef FLUSH_BASE_MINICACHE
 965        map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
 966        map.virtual = FLUSH_BASE_MINICACHE;
 967        map.length = SZ_1M;
 968        map.type = MT_MINICLEAN;
 969        create_mapping(&map);
 970#endif
 971
 972        /*
 973         * Create a mapping for the machine vectors at the high-vectors
 974         * location (0xffff0000).  If we aren't using high-vectors, also
 975         * create a mapping at the low-vectors virtual address.
 976         */
 977        map.pfn = __phys_to_pfn(virt_to_phys(vectors));
 978        map.virtual = 0xffff0000;
 979        map.length = PAGE_SIZE;
 980        map.type = MT_HIGH_VECTORS;
 981        create_mapping(&map);
 982
 983        if (!vectors_high()) {
 984                map.virtual = 0;
 985                map.type = MT_LOW_VECTORS;
 986                create_mapping(&map);
 987        }
 988
 989        /*
 990         * Ask the machine support to map in the statically mapped devices.
 991         */
 992        if (mdesc->map_io)
 993                mdesc->map_io();
 994
 995        /*
 996         * Finally flush the caches and tlb to ensure that we're in a
 997         * consistent state wrt the writebuffer.  This also ensures that
 998         * any write-allocated cache lines in the vector page are written
 999         * back.  After this point, we can start to touch devices again.
1000         */
1001        local_flush_tlb_all();
1002        flush_cache_all();
1003}
1004
1005static void __init kmap_init(void)
1006{
1007#ifdef CONFIG_HIGHMEM
1008        pmd_t *pmd = pmd_off_k(PKMAP_BASE);
1009        pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
1010        BUG_ON(!pmd_none(*pmd) || !pte);
1011        __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE);
1012        pkmap_page_table = pte + PTRS_PER_PTE;
1013#endif
1014}
1015
1016/*
1017 * paging_init() sets up the page tables, initialises the zone memory
1018 * maps, and sets up the zero page, bad page and bad page tables.
1019 */
1020void __init paging_init(struct machine_desc *mdesc)
1021{
1022        void *zero_page;
1023
1024        build_mem_type_table();
1025        sanity_check_meminfo();
1026        prepare_page_table();
1027        bootmem_init();
1028        devicemaps_init(mdesc);
1029        kmap_init();
1030
1031        top_pmd = pmd_off_k(0xffff0000);
1032
1033        /*
1034         * allocate the zero page.  Note that this always succeeds and
1035         * returns a zeroed result.
1036         */
1037        zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
1038        empty_zero_page = virt_to_page(zero_page);
1039        flush_dcache_page(empty_zero_page);
1040}
1041
1042/*
1043 * In order to soft-boot, we need to insert a 1:1 mapping in place of
1044 * the user-mode pages.  This will then ensure that we have predictable
1045 * results when turning the mmu off
1046 */
1047void setup_mm_for_reboot(char mode)
1048{
1049        unsigned long base_pmdval;
1050        pgd_t *pgd;
1051        int i;
1052
1053        if (current->mm && current->mm->pgd)
1054                pgd = current->mm->pgd;
1055        else
1056                pgd = init_mm.pgd;
1057
1058        base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
1059        if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
1060                base_pmdval |= PMD_BIT4;
1061
1062        for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
1063                unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
1064                pmd_t *pmd;
1065
1066                pmd = pmd_off(pgd, i << PGDIR_SHIFT);
1067                pmd[0] = __pmd(pmdval);
1068                pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1069                flush_pmd_entry(pmd);
1070        }
1071}
1072