linux/arch/powerpc/mm/hash_utils_64.c
<<
>>
Prefs
   1/*
   2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
   3 *   {mikejc|engebret}@us.ibm.com
   4 *
   5 *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
   6 *
   7 * SMP scalability work:
   8 *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
   9 * 
  10 *    Module name: htab.c
  11 *
  12 *    Description:
  13 *      PowerPC Hashed Page Table functions
  14 *
  15 * This program is free software; you can redistribute it and/or
  16 * modify it under the terms of the GNU General Public License
  17 * as published by the Free Software Foundation; either version
  18 * 2 of the License, or (at your option) any later version.
  19 */
  20
  21#undef DEBUG
  22#undef DEBUG_LOW
  23
  24#include <linux/spinlock.h>
  25#include <linux/errno.h>
  26#include <linux/sched.h>
  27#include <linux/proc_fs.h>
  28#include <linux/stat.h>
  29#include <linux/sysctl.h>
  30#include <linux/export.h>
  31#include <linux/ctype.h>
  32#include <linux/cache.h>
  33#include <linux/init.h>
  34#include <linux/signal.h>
  35#include <linux/memblock.h>
  36#include <linux/context_tracking.h>
  37
  38#include <asm/processor.h>
  39#include <asm/pgtable.h>
  40#include <asm/mmu.h>
  41#include <asm/mmu_context.h>
  42#include <asm/page.h>
  43#include <asm/types.h>
  44#include <asm/uaccess.h>
  45#include <asm/machdep.h>
  46#include <asm/prom.h>
  47#include <asm/tlbflush.h>
  48#include <asm/io.h>
  49#include <asm/eeh.h>
  50#include <asm/tlb.h>
  51#include <asm/cacheflush.h>
  52#include <asm/cputable.h>
  53#include <asm/sections.h>
  54#include <asm/copro.h>
  55#include <asm/udbg.h>
  56#include <asm/code-patching.h>
  57#include <asm/fadump.h>
  58#include <asm/firmware.h>
  59#include <asm/tm.h>
  60#include <asm/trace.h>
  61
  62#ifdef DEBUG
  63#define DBG(fmt...) udbg_printf(fmt)
  64#else
  65#define DBG(fmt...)
  66#endif
  67
  68#ifdef DEBUG_LOW
  69#define DBG_LOW(fmt...) udbg_printf(fmt)
  70#else
  71#define DBG_LOW(fmt...)
  72#endif
  73
  74#define KB (1024)
  75#define MB (1024*KB)
  76#define GB (1024L*MB)
  77
  78/*
  79 * Note:  pte   --> Linux PTE
  80 *        HPTE  --> PowerPC Hashed Page Table Entry
  81 *
  82 * Execution context:
  83 *   htab_initialize is called with the MMU off (of course), but
  84 *   the kernel has been copied down to zero so it can directly
  85 *   reference global data.  At this point it is very difficult
  86 *   to print debug info.
  87 *
  88 */
  89
  90#ifdef CONFIG_U3_DART
  91extern unsigned long dart_tablebase;
  92#endif /* CONFIG_U3_DART */
  93
  94static unsigned long _SDR1;
  95struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
  96EXPORT_SYMBOL_GPL(mmu_psize_defs);
  97
  98struct hash_pte *htab_address;
  99unsigned long htab_size_bytes;
 100unsigned long htab_hash_mask;
 101EXPORT_SYMBOL_GPL(htab_hash_mask);
 102int mmu_linear_psize = MMU_PAGE_4K;
 103EXPORT_SYMBOL_GPL(mmu_linear_psize);
 104int mmu_virtual_psize = MMU_PAGE_4K;
 105int mmu_vmalloc_psize = MMU_PAGE_4K;
 106#ifdef CONFIG_SPARSEMEM_VMEMMAP
 107int mmu_vmemmap_psize = MMU_PAGE_4K;
 108#endif
 109int mmu_io_psize = MMU_PAGE_4K;
 110int mmu_kernel_ssize = MMU_SEGSIZE_256M;
 111EXPORT_SYMBOL_GPL(mmu_kernel_ssize);
 112int mmu_highuser_ssize = MMU_SEGSIZE_256M;
 113u16 mmu_slb_size = 64;
 114EXPORT_SYMBOL_GPL(mmu_slb_size);
 115#ifdef CONFIG_PPC_64K_PAGES
 116int mmu_ci_restrictions;
 117#endif
 118#ifdef CONFIG_DEBUG_PAGEALLOC
 119static u8 *linear_map_hash_slots;
 120static unsigned long linear_map_hash_count;
 121static DEFINE_SPINLOCK(linear_map_hash_lock);
 122#endif /* CONFIG_DEBUG_PAGEALLOC */
 123
 124/* There are definitions of page sizes arrays to be used when none
 125 * is provided by the firmware.
 126 */
 127
 128/* Pre-POWER4 CPUs (4k pages only)
 129 */
 130static struct mmu_psize_def mmu_psize_defaults_old[] = {
 131        [MMU_PAGE_4K] = {
 132                .shift  = 12,
 133                .sllp   = 0,
 134                .penc   = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
 135                .avpnm  = 0,
 136                .tlbiel = 0,
 137        },
 138};
 139
 140/* POWER4, GPUL, POWER5
 141 *
 142 * Support for 16Mb large pages
 143 */
 144static struct mmu_psize_def mmu_psize_defaults_gp[] = {
 145        [MMU_PAGE_4K] = {
 146                .shift  = 12,
 147                .sllp   = 0,
 148                .penc   = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
 149                .avpnm  = 0,
 150                .tlbiel = 1,
 151        },
 152        [MMU_PAGE_16M] = {
 153                .shift  = 24,
 154                .sllp   = SLB_VSID_L,
 155                .penc   = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0,
 156                            [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 },
 157                .avpnm  = 0x1UL,
 158                .tlbiel = 0,
 159        },
 160};
 161
 162static unsigned long htab_convert_pte_flags(unsigned long pteflags)
 163{
 164        unsigned long rflags = pteflags & 0x1fa;
 165
 166        /* _PAGE_EXEC -> NOEXEC */
 167        if ((pteflags & _PAGE_EXEC) == 0)
 168                rflags |= HPTE_R_N;
 169
 170        /* PP bits. PAGE_USER is already PP bit 0x2, so we only
 171         * need to add in 0x1 if it's a read-only user page
 172         */
 173        if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
 174                                         (pteflags & _PAGE_DIRTY)))
 175                rflags |= 1;
 176        /*
 177         * Always add "C" bit for perf. Memory coherence is always enabled
 178         */
 179        return rflags | HPTE_R_C | HPTE_R_M;
 180}
 181
 182int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 183                      unsigned long pstart, unsigned long prot,
 184                      int psize, int ssize)
 185{
 186        unsigned long vaddr, paddr;
 187        unsigned int step, shift;
 188        int ret = 0;
 189
 190        shift = mmu_psize_defs[psize].shift;
 191        step = 1 << shift;
 192
 193        prot = htab_convert_pte_flags(prot);
 194
 195        DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
 196            vstart, vend, pstart, prot, psize, ssize);
 197
 198        for (vaddr = vstart, paddr = pstart; vaddr < vend;
 199             vaddr += step, paddr += step) {
 200                unsigned long hash, hpteg;
 201                unsigned long vsid = get_kernel_vsid(vaddr, ssize);
 202                unsigned long vpn  = hpt_vpn(vaddr, vsid, ssize);
 203                unsigned long tprot = prot;
 204
 205                /*
 206                 * If we hit a bad address return error.
 207                 */
 208                if (!vsid)
 209                        return -1;
 210                /* Make kernel text executable */
 211                if (overlaps_kernel_text(vaddr, vaddr + step))
 212                        tprot &= ~HPTE_R_N;
 213
 214                /* Make kvm guest trampolines executable */
 215                if (overlaps_kvm_tmp(vaddr, vaddr + step))
 216                        tprot &= ~HPTE_R_N;
 217
 218                /*
 219                 * If relocatable, check if it overlaps interrupt vectors that
 220                 * are copied down to real 0. For relocatable kernel
 221                 * (e.g. kdump case) we copy interrupt vectors down to real
 222                 * address 0. Mark that region as executable. This is
 223                 * because on p8 system with relocation on exception feature
 224                 * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
 225                 * in order to execute the interrupt handlers in virtual
 226                 * mode the vector region need to be marked as executable.
 227                 */
 228                if ((PHYSICAL_START > MEMORY_START) &&
 229                        overlaps_interrupt_vector_text(vaddr, vaddr + step))
 230                                tprot &= ~HPTE_R_N;
 231
 232                hash = hpt_hash(vpn, shift, ssize);
 233                hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 234
 235                BUG_ON(!ppc_md.hpte_insert);
 236                ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
 237                                         HPTE_V_BOLTED, psize, psize, ssize);
 238
 239                if (ret < 0)
 240                        break;
 241#ifdef CONFIG_DEBUG_PAGEALLOC
 242                if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
 243                        linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
 244#endif /* CONFIG_DEBUG_PAGEALLOC */
 245        }
 246        return ret < 0 ? ret : 0;
 247}
 248
 249#ifdef CONFIG_MEMORY_HOTPLUG
 250int htab_remove_mapping(unsigned long vstart, unsigned long vend,
 251                      int psize, int ssize)
 252{
 253        unsigned long vaddr;
 254        unsigned int step, shift;
 255
 256        shift = mmu_psize_defs[psize].shift;
 257        step = 1 << shift;
 258
 259        if (!ppc_md.hpte_removebolted) {
 260                printk(KERN_WARNING "Platform doesn't implement "
 261                                "hpte_removebolted\n");
 262                return -EINVAL;
 263        }
 264
 265        for (vaddr = vstart; vaddr < vend; vaddr += step)
 266                ppc_md.hpte_removebolted(vaddr, psize, ssize);
 267
 268        return 0;
 269}
 270#endif /* CONFIG_MEMORY_HOTPLUG */
 271
 272static int __init htab_dt_scan_seg_sizes(unsigned long node,
 273                                         const char *uname, int depth,
 274                                         void *data)
 275{
 276        const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
 277        const __be32 *prop;
 278        int size = 0;
 279
 280        /* We are scanning "cpu" nodes only */
 281        if (type == NULL || strcmp(type, "cpu") != 0)
 282                return 0;
 283
 284        prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size);
 285        if (prop == NULL)
 286                return 0;
 287        for (; size >= 4; size -= 4, ++prop) {
 288                if (be32_to_cpu(prop[0]) == 40) {
 289                        DBG("1T segment support detected\n");
 290                        cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
 291                        return 1;
 292                }
 293        }
 294        cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
 295        return 0;
 296}
 297
 298static void __init htab_init_seg_sizes(void)
 299{
 300        of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
 301}
 302
 303static int __init get_idx_from_shift(unsigned int shift)
 304{
 305        int idx = -1;
 306
 307        switch (shift) {
 308        case 0xc:
 309                idx = MMU_PAGE_4K;
 310                break;
 311        case 0x10:
 312                idx = MMU_PAGE_64K;
 313                break;
 314        case 0x14:
 315                idx = MMU_PAGE_1M;
 316                break;
 317        case 0x18:
 318                idx = MMU_PAGE_16M;
 319                break;
 320        case 0x22:
 321                idx = MMU_PAGE_16G;
 322                break;
 323        }
 324        return idx;
 325}
 326
 327static int __init htab_dt_scan_page_sizes(unsigned long node,
 328                                          const char *uname, int depth,
 329                                          void *data)
 330{
 331        const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
 332        const __be32 *prop;
 333        int size = 0;
 334
 335        /* We are scanning "cpu" nodes only */
 336        if (type == NULL || strcmp(type, "cpu") != 0)
 337                return 0;
 338
 339        prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);
 340        if (!prop)
 341                return 0;
 342
 343        pr_info("Page sizes from device-tree:\n");
 344        size /= 4;
 345        cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
 346        while(size > 0) {
 347                unsigned int base_shift = be32_to_cpu(prop[0]);
 348                unsigned int slbenc = be32_to_cpu(prop[1]);
 349                unsigned int lpnum = be32_to_cpu(prop[2]);
 350                struct mmu_psize_def *def;
 351                int idx, base_idx;
 352
 353                size -= 3; prop += 3;
 354                base_idx = get_idx_from_shift(base_shift);
 355                if (base_idx < 0) {
 356                        /* skip the pte encoding also */
 357                        prop += lpnum * 2; size -= lpnum * 2;
 358                        continue;
 359                }
 360                def = &mmu_psize_defs[base_idx];
 361                if (base_idx == MMU_PAGE_16M)
 362                        cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
 363
 364                def->shift = base_shift;
 365                if (base_shift <= 23)
 366                        def->avpnm = 0;
 367                else
 368                        def->avpnm = (1 << (base_shift - 23)) - 1;
 369                def->sllp = slbenc;
 370                /*
 371                 * We don't know for sure what's up with tlbiel, so
 372                 * for now we only set it for 4K and 64K pages
 373                 */
 374                if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K)
 375                        def->tlbiel = 1;
 376                else
 377                        def->tlbiel = 0;
 378
 379                while (size > 0 && lpnum) {
 380                        unsigned int shift = be32_to_cpu(prop[0]);
 381                        int penc  = be32_to_cpu(prop[1]);
 382
 383                        prop += 2; size -= 2;
 384                        lpnum--;
 385
 386                        idx = get_idx_from_shift(shift);
 387                        if (idx < 0)
 388                                continue;
 389
 390                        if (penc == -1)
 391                                pr_err("Invalid penc for base_shift=%d "
 392                                       "shift=%d\n", base_shift, shift);
 393
 394                        def->penc[idx] = penc;
 395                        pr_info("base_shift=%d: shift=%d, sllp=0x%04lx,"
 396                                " avpnm=0x%08lx, tlbiel=%d, penc=%d\n",
 397                                base_shift, shift, def->sllp,
 398                                def->avpnm, def->tlbiel, def->penc[idx]);
 399                }
 400        }
 401
 402        return 1;
 403}
 404
 405#ifdef CONFIG_HUGETLB_PAGE
 406/* Scan for 16G memory blocks that have been set aside for huge pages
 407 * and reserve those blocks for 16G huge pages.
 408 */
 409static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
 410                                        const char *uname, int depth,
 411                                        void *data) {
 412        const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
 413        const __be64 *addr_prop;
 414        const __be32 *page_count_prop;
 415        unsigned int expected_pages;
 416        long unsigned int phys_addr;
 417        long unsigned int block_size;
 418
 419        /* We are scanning "memory" nodes only */
 420        if (type == NULL || strcmp(type, "memory") != 0)
 421                return 0;
 422
 423        /* This property is the log base 2 of the number of virtual pages that
 424         * will represent this memory block. */
 425        page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
 426        if (page_count_prop == NULL)
 427                return 0;
 428        expected_pages = (1 << be32_to_cpu(page_count_prop[0]));
 429        addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
 430        if (addr_prop == NULL)
 431                return 0;
 432        phys_addr = be64_to_cpu(addr_prop[0]);
 433        block_size = be64_to_cpu(addr_prop[1]);
 434        if (block_size != (16 * GB))
 435                return 0;
 436        printk(KERN_INFO "Huge page(16GB) memory: "
 437                        "addr = 0x%lX size = 0x%lX pages = %d\n",
 438                        phys_addr, block_size, expected_pages);
 439        if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
 440                memblock_reserve(phys_addr, block_size * expected_pages);
 441                add_gpage(phys_addr, block_size, expected_pages);
 442        }
 443        return 0;
 444}
 445#endif /* CONFIG_HUGETLB_PAGE */
 446
 447static void mmu_psize_set_default_penc(void)
 448{
 449        int bpsize, apsize;
 450        for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
 451                for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++)
 452                        mmu_psize_defs[bpsize].penc[apsize] = -1;
 453}
 454
 455#ifdef CONFIG_PPC_64K_PAGES
 456
 457static bool might_have_hea(void)
 458{
 459        /*
 460         * The HEA ethernet adapter requires awareness of the
 461         * GX bus. Without that awareness we can easily assume
 462         * we will never see an HEA ethernet device.
 463         */
 464#ifdef CONFIG_IBMEBUS
 465        return !cpu_has_feature(CPU_FTR_ARCH_207S);
 466#else
 467        return false;
 468#endif
 469}
 470
 471#endif /* #ifdef CONFIG_PPC_64K_PAGES */
 472
 473static void __init htab_init_page_sizes(void)
 474{
 475        int rc;
 476
 477        /* se the invalid penc to -1 */
 478        mmu_psize_set_default_penc();
 479
 480        /* Default to 4K pages only */
 481        memcpy(mmu_psize_defs, mmu_psize_defaults_old,
 482               sizeof(mmu_psize_defaults_old));
 483
 484        /*
 485         * Try to find the available page sizes in the device-tree
 486         */
 487        rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
 488        if (rc != 0)  /* Found */
 489                goto found;
 490
 491        /*
 492         * Not in the device-tree, let's fallback on known size
 493         * list for 16M capable GP & GR
 494         */
 495        if (mmu_has_feature(MMU_FTR_16M_PAGE))
 496                memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
 497                       sizeof(mmu_psize_defaults_gp));
 498 found:
 499#ifndef CONFIG_DEBUG_PAGEALLOC
 500        /*
 501         * Pick a size for the linear mapping. Currently, we only support
 502         * 16M, 1M and 4K which is the default
 503         */
 504        if (mmu_psize_defs[MMU_PAGE_16M].shift)
 505                mmu_linear_psize = MMU_PAGE_16M;
 506        else if (mmu_psize_defs[MMU_PAGE_1M].shift)
 507                mmu_linear_psize = MMU_PAGE_1M;
 508#endif /* CONFIG_DEBUG_PAGEALLOC */
 509
 510#ifdef CONFIG_PPC_64K_PAGES
 511        /*
 512         * Pick a size for the ordinary pages. Default is 4K, we support
 513         * 64K for user mappings and vmalloc if supported by the processor.
 514         * We only use 64k for ioremap if the processor
 515         * (and firmware) support cache-inhibited large pages.
 516         * If not, we use 4k and set mmu_ci_restrictions so that
 517         * hash_page knows to switch processes that use cache-inhibited
 518         * mappings to 4k pages.
 519         */
 520        if (mmu_psize_defs[MMU_PAGE_64K].shift) {
 521                mmu_virtual_psize = MMU_PAGE_64K;
 522                mmu_vmalloc_psize = MMU_PAGE_64K;
 523                if (mmu_linear_psize == MMU_PAGE_4K)
 524                        mmu_linear_psize = MMU_PAGE_64K;
 525                if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
 526                        /*
 527                         * When running on pSeries using 64k pages for ioremap
 528                         * would stop us accessing the HEA ethernet. So if we
 529                         * have the chance of ever seeing one, stay at 4k.
 530                         */
 531                        if (!might_have_hea() || !machine_is(pseries))
 532                                mmu_io_psize = MMU_PAGE_64K;
 533                } else
 534                        mmu_ci_restrictions = 1;
 535        }
 536#endif /* CONFIG_PPC_64K_PAGES */
 537
 538#ifdef CONFIG_SPARSEMEM_VMEMMAP
 539        /* We try to use 16M pages for vmemmap if that is supported
 540         * and we have at least 1G of RAM at boot
 541         */
 542        if (mmu_psize_defs[MMU_PAGE_16M].shift &&
 543            memblock_phys_mem_size() >= 0x40000000)
 544                mmu_vmemmap_psize = MMU_PAGE_16M;
 545        else if (mmu_psize_defs[MMU_PAGE_64K].shift)
 546                mmu_vmemmap_psize = MMU_PAGE_64K;
 547        else
 548                mmu_vmemmap_psize = MMU_PAGE_4K;
 549#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 550
 551        printk(KERN_DEBUG "Page orders: linear mapping = %d, "
 552               "virtual = %d, io = %d"
 553#ifdef CONFIG_SPARSEMEM_VMEMMAP
 554               ", vmemmap = %d"
 555#endif
 556               "\n",
 557               mmu_psize_defs[mmu_linear_psize].shift,
 558               mmu_psize_defs[mmu_virtual_psize].shift,
 559               mmu_psize_defs[mmu_io_psize].shift
 560#ifdef CONFIG_SPARSEMEM_VMEMMAP
 561               ,mmu_psize_defs[mmu_vmemmap_psize].shift
 562#endif
 563               );
 564
 565#ifdef CONFIG_HUGETLB_PAGE
 566        /* Reserve 16G huge page memory sections for huge pages */
 567        of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
 568#endif /* CONFIG_HUGETLB_PAGE */
 569}
 570
 571static int __init htab_dt_scan_pftsize(unsigned long node,
 572                                       const char *uname, int depth,
 573                                       void *data)
 574{
 575        const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
 576        const __be32 *prop;
 577
 578        /* We are scanning "cpu" nodes only */
 579        if (type == NULL || strcmp(type, "cpu") != 0)
 580                return 0;
 581
 582        prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
 583        if (prop != NULL) {
 584                /* pft_size[0] is the NUMA CEC cookie */
 585                ppc64_pft_size = be32_to_cpu(prop[1]);
 586                return 1;
 587        }
 588        return 0;
 589}
 590
 591static unsigned long __init htab_get_table_size(void)
 592{
 593        unsigned long mem_size, rnd_mem_size, pteg_count, psize;
 594
 595        /* If hash size isn't already provided by the platform, we try to
 596         * retrieve it from the device-tree. If it's not there neither, we
 597         * calculate it now based on the total RAM size
 598         */
 599        if (ppc64_pft_size == 0)
 600                of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
 601        if (ppc64_pft_size)
 602                return 1UL << ppc64_pft_size;
 603
 604        /* round mem_size up to next power of 2 */
 605        mem_size = memblock_phys_mem_size();
 606        rnd_mem_size = 1UL << __ilog2(mem_size);
 607        if (rnd_mem_size < mem_size)
 608                rnd_mem_size <<= 1;
 609
 610        /* # pages / 2 */
 611        psize = mmu_psize_defs[mmu_virtual_psize].shift;
 612        pteg_count = max(rnd_mem_size >> (psize + 1), 1UL << 11);
 613
 614        return pteg_count << 7;
 615}
 616
 617#ifdef CONFIG_MEMORY_HOTPLUG
 618int create_section_mapping(unsigned long start, unsigned long end)
 619{
 620        return htab_bolt_mapping(start, end, __pa(start),
 621                                 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
 622                                 mmu_kernel_ssize);
 623}
 624
 625int remove_section_mapping(unsigned long start, unsigned long end)
 626{
 627        return htab_remove_mapping(start, end, mmu_linear_psize,
 628                        mmu_kernel_ssize);
 629}
 630#endif /* CONFIG_MEMORY_HOTPLUG */
 631
 632extern u32 htab_call_hpte_insert1[];
 633extern u32 htab_call_hpte_insert2[];
 634extern u32 htab_call_hpte_remove[];
 635extern u32 htab_call_hpte_updatepp[];
 636extern u32 ht64_call_hpte_insert1[];
 637extern u32 ht64_call_hpte_insert2[];
 638extern u32 ht64_call_hpte_remove[];
 639extern u32 ht64_call_hpte_updatepp[];
 640
 641static void __init htab_finish_init(void)
 642{
 643#ifdef CONFIG_PPC_HAS_HASH_64K
 644        patch_branch(ht64_call_hpte_insert1,
 645                ppc_function_entry(ppc_md.hpte_insert),
 646                BRANCH_SET_LINK);
 647        patch_branch(ht64_call_hpte_insert2,
 648                ppc_function_entry(ppc_md.hpte_insert),
 649                BRANCH_SET_LINK);
 650        patch_branch(ht64_call_hpte_remove,
 651                ppc_function_entry(ppc_md.hpte_remove),
 652                BRANCH_SET_LINK);
 653        patch_branch(ht64_call_hpte_updatepp,
 654                ppc_function_entry(ppc_md.hpte_updatepp),
 655                BRANCH_SET_LINK);
 656#endif /* CONFIG_PPC_HAS_HASH_64K */
 657
 658        patch_branch(htab_call_hpte_insert1,
 659                ppc_function_entry(ppc_md.hpte_insert),
 660                BRANCH_SET_LINK);
 661        patch_branch(htab_call_hpte_insert2,
 662                ppc_function_entry(ppc_md.hpte_insert),
 663                BRANCH_SET_LINK);
 664        patch_branch(htab_call_hpte_remove,
 665                ppc_function_entry(ppc_md.hpte_remove),
 666                BRANCH_SET_LINK);
 667        patch_branch(htab_call_hpte_updatepp,
 668                ppc_function_entry(ppc_md.hpte_updatepp),
 669                BRANCH_SET_LINK);
 670}
 671
 672static void __init htab_initialize(void)
 673{
 674        unsigned long table;
 675        unsigned long pteg_count;
 676        unsigned long prot;
 677        unsigned long base = 0, size = 0, limit;
 678        struct memblock_region *reg;
 679
 680        DBG(" -> htab_initialize()\n");
 681
 682        /* Initialize segment sizes */
 683        htab_init_seg_sizes();
 684
 685        /* Initialize page sizes */
 686        htab_init_page_sizes();
 687
 688        if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
 689                mmu_kernel_ssize = MMU_SEGSIZE_1T;
 690                mmu_highuser_ssize = MMU_SEGSIZE_1T;
 691                printk(KERN_INFO "Using 1TB segments\n");
 692        }
 693
 694        /*
 695         * Calculate the required size of the htab.  We want the number of
 696         * PTEGs to equal one half the number of real pages.
 697         */ 
 698        htab_size_bytes = htab_get_table_size();
 699        pteg_count = htab_size_bytes >> 7;
 700
 701        htab_hash_mask = pteg_count - 1;
 702
 703        if (firmware_has_feature(FW_FEATURE_LPAR)) {
 704                /* Using a hypervisor which owns the htab */
 705                htab_address = NULL;
 706                _SDR1 = 0; 
 707#ifdef CONFIG_FA_DUMP
 708                /*
 709                 * If firmware assisted dump is active firmware preserves
 710                 * the contents of htab along with entire partition memory.
 711                 * Clear the htab if firmware assisted dump is active so
 712                 * that we dont end up using old mappings.
 713                 */
 714                if (is_fadump_active() && ppc_md.hpte_clear_all)
 715                        ppc_md.hpte_clear_all();
 716#endif
 717        } else {
 718                /* Find storage for the HPT.  Must be contiguous in
 719                 * the absolute address space. On cell we want it to be
 720                 * in the first 2 Gig so we can use it for IOMMU hacks.
 721                 */
 722                if (machine_is(cell))
 723                        limit = 0x80000000;
 724                else
 725                        limit = MEMBLOCK_ALLOC_ANYWHERE;
 726
 727                table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
 728
 729                DBG("Hash table allocated at %lx, size: %lx\n", table,
 730                    htab_size_bytes);
 731
 732                htab_address = __va(table);
 733
 734                /* htab absolute addr + encoded htabsize */
 735                _SDR1 = table + __ilog2(pteg_count) - 11;
 736
 737                /* Initialize the HPT with no entries */
 738                memset((void *)table, 0, htab_size_bytes);
 739
 740                /* Set SDR1 */
 741                mtspr(SPRN_SDR1, _SDR1);
 742        }
 743
 744        prot = pgprot_val(PAGE_KERNEL);
 745
 746#ifdef CONFIG_DEBUG_PAGEALLOC
 747        linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
 748        linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
 749                                                    1, ppc64_rma_size));
 750        memset(linear_map_hash_slots, 0, linear_map_hash_count);
 751#endif /* CONFIG_DEBUG_PAGEALLOC */
 752
 753        /* On U3 based machines, we need to reserve the DART area and
 754         * _NOT_ map it to avoid cache paradoxes as it's remapped non
 755         * cacheable later on
 756         */
 757
 758        /* create bolted the linear mapping in the hash table */
 759        for_each_memblock(memory, reg) {
 760                base = (unsigned long)__va(reg->base);
 761                size = reg->size;
 762
 763                DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
 764                    base, size, prot);
 765
 766#ifdef CONFIG_U3_DART
 767                /* Do not map the DART space. Fortunately, it will be aligned
 768                 * in such a way that it will not cross two memblock regions and
 769                 * will fit within a single 16Mb page.
 770                 * The DART space is assumed to be a full 16Mb region even if
 771                 * we only use 2Mb of that space. We will use more of it later
 772                 * for AGP GART. We have to use a full 16Mb large page.
 773                 */
 774                DBG("DART base: %lx\n", dart_tablebase);
 775
 776                if (dart_tablebase != 0 && dart_tablebase >= base
 777                    && dart_tablebase < (base + size)) {
 778                        unsigned long dart_table_end = dart_tablebase + 16 * MB;
 779                        if (base != dart_tablebase)
 780                                BUG_ON(htab_bolt_mapping(base, dart_tablebase,
 781                                                        __pa(base), prot,
 782                                                        mmu_linear_psize,
 783                                                        mmu_kernel_ssize));
 784                        if ((base + size) > dart_table_end)
 785                                BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
 786                                                        base + size,
 787                                                        __pa(dart_table_end),
 788                                                         prot,
 789                                                         mmu_linear_psize,
 790                                                         mmu_kernel_ssize));
 791                        continue;
 792                }
 793#endif /* CONFIG_U3_DART */
 794                BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
 795                                prot, mmu_linear_psize, mmu_kernel_ssize));
 796        }
 797        memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
 798
 799        /*
 800         * If we have a memory_limit and we've allocated TCEs then we need to
 801         * explicitly map the TCE area at the top of RAM. We also cope with the
 802         * case that the TCEs start below memory_limit.
 803         * tce_alloc_start/end are 16MB aligned so the mapping should work
 804         * for either 4K or 16MB pages.
 805         */
 806        if (tce_alloc_start) {
 807                tce_alloc_start = (unsigned long)__va(tce_alloc_start);
 808                tce_alloc_end = (unsigned long)__va(tce_alloc_end);
 809
 810                if (base + size >= tce_alloc_start)
 811                        tce_alloc_start = base + size + 1;
 812
 813                BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
 814                                         __pa(tce_alloc_start), prot,
 815                                         mmu_linear_psize, mmu_kernel_ssize));
 816        }
 817
 818        htab_finish_init();
 819
 820        DBG(" <- htab_initialize()\n");
 821}
 822#undef KB
 823#undef MB
 824
 825void __init early_init_mmu(void)
 826{
 827        /* Initialize the MMU Hash table and create the linear mapping
 828         * of memory. Has to be done before SLB initialization as this is
 829         * currently where the page size encoding is obtained.
 830         */
 831        htab_initialize();
 832
 833        /* Initialize SLB management */
 834        slb_initialize();
 835}
 836
 837#ifdef CONFIG_SMP
 838void early_init_mmu_secondary(void)
 839{
 840        /* Initialize hash table for that CPU */
 841        if (!firmware_has_feature(FW_FEATURE_LPAR))
 842                mtspr(SPRN_SDR1, _SDR1);
 843
 844        /* Initialize SLB */
 845        slb_initialize();
 846}
 847#endif /* CONFIG_SMP */
 848
 849/*
 850 * Called by asm hashtable.S for doing lazy icache flush
 851 */
 852unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
 853{
 854        struct page *page;
 855
 856        if (!pfn_valid(pte_pfn(pte)))
 857                return pp;
 858
 859        page = pte_page(pte);
 860
 861        /* page is dirty */
 862        if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
 863                if (trap == 0x400) {
 864                        flush_dcache_icache_page(page);
 865                        set_bit(PG_arch_1, &page->flags);
 866                } else
 867                        pp |= HPTE_R_N;
 868        }
 869        return pp;
 870}
 871
 872#ifdef CONFIG_PPC_MM_SLICES
 873static unsigned int get_paca_psize(unsigned long addr)
 874{
 875        u64 lpsizes;
 876        unsigned char *hpsizes;
 877        unsigned long index, mask_index;
 878
 879        if (addr < SLICE_LOW_TOP) {
 880                lpsizes = get_paca()->context.low_slices_psize;
 881                index = GET_LOW_SLICE_INDEX(addr);
 882                return (lpsizes >> (index * 4)) & 0xF;
 883        }
 884        hpsizes = get_paca()->context.high_slices_psize;
 885        index = GET_HIGH_SLICE_INDEX(addr);
 886        mask_index = index & 0x1;
 887        return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
 888}
 889
 890#else
 891unsigned int get_paca_psize(unsigned long addr)
 892{
 893        return get_paca()->context.user_psize;
 894}
 895#endif
 896
 897/*
 898 * Demote a segment to using 4k pages.
 899 * For now this makes the whole process use 4k pages.
 900 */
 901#ifdef CONFIG_PPC_64K_PAGES
 902void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
 903{
 904        if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
 905                return;
 906        slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
 907        copro_flush_all_slbs(mm);
 908        if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
 909                get_paca()->context = mm->context;
 910                slb_flush_and_rebolt();
 911        }
 912}
 913#endif /* CONFIG_PPC_64K_PAGES */
 914
 915#ifdef CONFIG_PPC_SUBPAGE_PROT
 916/*
 917 * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
 918 * Userspace sets the subpage permissions using the subpage_prot system call.
 919 *
 920 * Result is 0: full permissions, _PAGE_RW: read-only,
 921 * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
 922 */
 923static int subpage_protection(struct mm_struct *mm, unsigned long ea)
 924{
 925        struct subpage_prot_table *spt = &mm->context.spt;
 926        u32 spp = 0;
 927        u32 **sbpm, *sbpp;
 928
 929        if (ea >= spt->maxaddr)
 930                return 0;
 931        if (ea < 0x100000000UL) {
 932                /* addresses below 4GB use spt->low_prot */
 933                sbpm = spt->low_prot;
 934        } else {
 935                sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
 936                if (!sbpm)
 937                        return 0;
 938        }
 939        sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
 940        if (!sbpp)
 941                return 0;
 942        spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
 943
 944        /* extract 2-bit bitfield for this 4k subpage */
 945        spp >>= 30 - 2 * ((ea >> 12) & 0xf);
 946
 947        /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */
 948        spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0);
 949        return spp;
 950}
 951
 952#else /* CONFIG_PPC_SUBPAGE_PROT */
 953static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
 954{
 955        return 0;
 956}
 957#endif
 958
 959void hash_failure_debug(unsigned long ea, unsigned long access,
 960                        unsigned long vsid, unsigned long trap,
 961                        int ssize, int psize, int lpsize, unsigned long pte)
 962{
 963        if (!printk_ratelimit())
 964                return;
 965        pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
 966                ea, access, current->comm);
 967        pr_info("    trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n",
 968                trap, vsid, ssize, psize, lpsize, pte);
 969}
 970
 971static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
 972                             int psize, bool user_region)
 973{
 974        if (user_region) {
 975                if (psize != get_paca_psize(ea)) {
 976                        get_paca()->context = mm->context;
 977                        slb_flush_and_rebolt();
 978                }
 979        } else if (get_paca()->vmalloc_sllp !=
 980                   mmu_psize_defs[mmu_vmalloc_psize].sllp) {
 981                get_paca()->vmalloc_sllp =
 982                        mmu_psize_defs[mmu_vmalloc_psize].sllp;
 983                slb_vmalloc_update();
 984        }
 985}
 986
 987/* Result code is:
 988 *  0 - handled
 989 *  1 - normal page fault
 990 * -1 - critical hash insertion error
 991 * -2 - access not permitted by subpage protection mechanism
 992 */
 993int hash_page_mm(struct mm_struct *mm, unsigned long ea,
 994                 unsigned long access, unsigned long trap,
 995                 unsigned long flags)
 996{
 997        enum ctx_state prev_state = exception_enter();
 998        pgd_t *pgdir;
 999        unsigned long vsid;
1000        pte_t *ptep;
1001        unsigned hugeshift;
1002        const struct cpumask *tmp;
1003        int rc, user_region = 0;
1004        int psize, ssize;
1005
1006        DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
1007                ea, access, trap);
1008        trace_hash_fault(ea, access, trap);
1009
1010        /* Get region & vsid */
1011        switch (REGION_ID(ea)) {
1012        case USER_REGION_ID:
1013                user_region = 1;
1014                if (! mm) {
1015                        DBG_LOW(" user region with no mm !\n");
1016                        rc = 1;
1017                        goto bail;
1018                }
1019                psize = get_slice_psize(mm, ea);
1020                ssize = user_segment_size(ea);
1021                vsid = get_vsid(mm->context.id, ea, ssize);
1022                break;
1023        case VMALLOC_REGION_ID:
1024                vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
1025                if (ea < VMALLOC_END)
1026                        psize = mmu_vmalloc_psize;
1027                else
1028                        psize = mmu_io_psize;
1029                ssize = mmu_kernel_ssize;
1030                break;
1031        default:
1032                /* Not a valid range
1033                 * Send the problem up to do_page_fault 
1034                 */
1035                rc = 1;
1036                goto bail;
1037        }
1038        DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
1039
1040        /* Bad address. */
1041        if (!vsid) {
1042                DBG_LOW("Bad address!\n");
1043                rc = 1;
1044                goto bail;
1045        }
1046        /* Get pgdir */
1047        pgdir = mm->pgd;
1048        if (pgdir == NULL) {
1049                rc = 1;
1050                goto bail;
1051        }
1052
1053        /* Check CPU locality */
1054        tmp = cpumask_of(smp_processor_id());
1055        if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
1056                flags |= HPTE_LOCAL_UPDATE;
1057
1058#ifndef CONFIG_PPC_64K_PAGES
1059        /* If we use 4K pages and our psize is not 4K, then we might
1060         * be hitting a special driver mapping, and need to align the
1061         * address before we fetch the PTE.
1062         *
1063         * It could also be a hugepage mapping, in which case this is
1064         * not necessary, but it's not harmful, either.
1065         */
1066        if (psize != MMU_PAGE_4K)
1067                ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
1068#endif /* CONFIG_PPC_64K_PAGES */
1069
1070        /* Get PTE and page size from page tables */
1071        ptep = __find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
1072        if (ptep == NULL || !pte_present(*ptep)) {
1073                DBG_LOW(" no PTE !\n");
1074                rc = 1;
1075                goto bail;
1076        }
1077
1078        /* Add _PAGE_PRESENT to the required access perm */
1079        access |= _PAGE_PRESENT;
1080
1081        /* Pre-check access permissions (will be re-checked atomically
1082         * in __hash_page_XX but this pre-check is a fast path
1083         */
1084        if (access & ~pte_val(*ptep)) {
1085                DBG_LOW(" no access !\n");
1086                rc = 1;
1087                goto bail;
1088        }
1089
1090        if (hugeshift) {
1091                if (pmd_trans_huge(*(pmd_t *)ptep))
1092                        rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep,
1093                                             trap, flags, ssize, psize);
1094#ifdef CONFIG_HUGETLB_PAGE
1095                else
1096                        rc = __hash_page_huge(ea, access, vsid, ptep, trap,
1097                                              flags, ssize, hugeshift, psize);
1098#else
1099                else {
1100                        /*
1101                         * if we have hugeshift, and is not transhuge with
1102                         * hugetlb disabled, something is really wrong.
1103                         */
1104                        rc = 1;
1105                        WARN_ON(1);
1106                }
1107#endif
1108                if (current->mm == mm)
1109                        check_paca_psize(ea, mm, psize, user_region);
1110
1111                goto bail;
1112        }
1113
1114#ifndef CONFIG_PPC_64K_PAGES
1115        DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
1116#else
1117        DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
1118                pte_val(*(ptep + PTRS_PER_PTE)));
1119#endif
1120        /* Do actual hashing */
1121#ifdef CONFIG_PPC_64K_PAGES
1122        /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
1123        if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
1124                demote_segment_4k(mm, ea);
1125                psize = MMU_PAGE_4K;
1126        }
1127
1128        /* If this PTE is non-cacheable and we have restrictions on
1129         * using non cacheable large pages, then we switch to 4k
1130         */
1131        if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
1132            (pte_val(*ptep) & _PAGE_NO_CACHE)) {
1133                if (user_region) {
1134                        demote_segment_4k(mm, ea);
1135                        psize = MMU_PAGE_4K;
1136                } else if (ea < VMALLOC_END) {
1137                        /*
1138                         * some driver did a non-cacheable mapping
1139                         * in vmalloc space, so switch vmalloc
1140                         * to 4k pages
1141                         */
1142                        printk(KERN_ALERT "Reducing vmalloc segment "
1143                               "to 4kB pages because of "
1144                               "non-cacheable mapping\n");
1145                        psize = mmu_vmalloc_psize = MMU_PAGE_4K;
1146                        copro_flush_all_slbs(mm);
1147                }
1148        }
1149
1150        if (current->mm == mm)
1151                check_paca_psize(ea, mm, psize, user_region);
1152#endif /* CONFIG_PPC_64K_PAGES */
1153
1154#ifdef CONFIG_PPC_HAS_HASH_64K
1155        if (psize == MMU_PAGE_64K)
1156                rc = __hash_page_64K(ea, access, vsid, ptep, trap,
1157                                     flags, ssize);
1158        else
1159#endif /* CONFIG_PPC_HAS_HASH_64K */
1160        {
1161                int spp = subpage_protection(mm, ea);
1162                if (access & spp)
1163                        rc = -2;
1164                else
1165                        rc = __hash_page_4K(ea, access, vsid, ptep, trap,
1166                                            flags, ssize, spp);
1167        }
1168
1169        /* Dump some info in case of hash insertion failure, they should
1170         * never happen so it is really useful to know if/when they do
1171         */
1172        if (rc == -1)
1173                hash_failure_debug(ea, access, vsid, trap, ssize, psize,
1174                                   psize, pte_val(*ptep));
1175#ifndef CONFIG_PPC_64K_PAGES
1176        DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
1177#else
1178        DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
1179                pte_val(*(ptep + PTRS_PER_PTE)));
1180#endif
1181        DBG_LOW(" -> rc=%d\n", rc);
1182
1183bail:
1184        exception_exit(prev_state);
1185        return rc;
1186}
1187EXPORT_SYMBOL_GPL(hash_page_mm);
1188
1189int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
1190              unsigned long dsisr)
1191{
1192        unsigned long flags = 0;
1193        struct mm_struct *mm = current->mm;
1194
1195        if (REGION_ID(ea) == VMALLOC_REGION_ID)
1196                mm = &init_mm;
1197
1198        if (dsisr & DSISR_NOHPTE)
1199                flags |= HPTE_NOHPTE_UPDATE;
1200
1201        return hash_page_mm(mm, ea, access, trap, flags);
1202}
1203EXPORT_SYMBOL_GPL(hash_page);
1204
1205void hash_preload(struct mm_struct *mm, unsigned long ea,
1206                  unsigned long access, unsigned long trap)
1207{
1208        int hugepage_shift;
1209        unsigned long vsid;
1210        pgd_t *pgdir;
1211        pte_t *ptep;
1212        unsigned long flags;
1213        int rc, ssize, update_flags = 0;
1214
1215        BUG_ON(REGION_ID(ea) != USER_REGION_ID);
1216
1217#ifdef CONFIG_PPC_MM_SLICES
1218        /* We only prefault standard pages for now */
1219        if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
1220                return;
1221#endif
1222
1223        DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
1224                " trap=%lx\n", mm, mm->pgd, ea, access, trap);
1225
1226        /* Get Linux PTE if available */
1227        pgdir = mm->pgd;
1228        if (pgdir == NULL)
1229                return;
1230
1231        /* Get VSID */
1232        ssize = user_segment_size(ea);
1233        vsid = get_vsid(mm->context.id, ea, ssize);
1234        if (!vsid)
1235                return;
1236        /*
1237         * Hash doesn't like irqs. Walking linux page table with irq disabled
1238         * saves us from holding multiple locks.
1239         */
1240        local_irq_save(flags);
1241
1242        /*
1243         * THP pages use update_mmu_cache_pmd. We don't do
1244         * hash preload there. Hence can ignore THP here
1245         */
1246        ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift);
1247        if (!ptep)
1248                goto out_exit;
1249
1250        WARN_ON(hugepage_shift);
1251#ifdef CONFIG_PPC_64K_PAGES
1252        /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
1253         * a 64K kernel), then we don't preload, hash_page() will take
1254         * care of it once we actually try to access the page.
1255         * That way we don't have to duplicate all of the logic for segment
1256         * page size demotion here
1257         */
1258        if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
1259                goto out_exit;
1260#endif /* CONFIG_PPC_64K_PAGES */
1261
1262        /* Is that local to this CPU ? */
1263        if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1264                update_flags |= HPTE_LOCAL_UPDATE;
1265
1266        /* Hash it in */
1267#ifdef CONFIG_PPC_HAS_HASH_64K
1268        if (mm->context.user_psize == MMU_PAGE_64K)
1269                rc = __hash_page_64K(ea, access, vsid, ptep, trap,
1270                                     update_flags, ssize);
1271        else
1272#endif /* CONFIG_PPC_HAS_HASH_64K */
1273                rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags,
1274                                    ssize, subpage_protection(mm, ea));
1275
1276        /* Dump some info in case of hash insertion failure, they should
1277         * never happen so it is really useful to know if/when they do
1278         */
1279        if (rc == -1)
1280                hash_failure_debug(ea, access, vsid, trap, ssize,
1281                                   mm->context.user_psize,
1282                                   mm->context.user_psize,
1283                                   pte_val(*ptep));
1284out_exit:
1285        local_irq_restore(flags);
1286}
1287
1288/* WARNING: This is called from hash_low_64.S, if you change this prototype,
1289 *          do not forget to update the assembly call site !
1290 */
1291void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
1292                     unsigned long flags)
1293{
1294        unsigned long hash, index, shift, hidx, slot;
1295        int local = flags & HPTE_LOCAL_UPDATE;
1296
1297        DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
1298        pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1299                hash = hpt_hash(vpn, shift, ssize);
1300                hidx = __rpte_to_hidx(pte, index);
1301                if (hidx & _PTEIDX_SECONDARY)
1302                        hash = ~hash;
1303                slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1304                slot += hidx & _PTEIDX_GROUP_IX;
1305                DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
1306                /*
1307                 * We use same base page size and actual psize, because we don't
1308                 * use these functions for hugepage
1309                 */
1310                ppc_md.hpte_invalidate(slot, vpn, psize, psize, ssize, local);
1311        } pte_iterate_hashed_end();
1312
1313#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1314        /* Transactions are not aborted by tlbiel, only tlbie.
1315         * Without, syncing a page back to a block device w/ PIO could pick up
1316         * transactional data (bad!) so we force an abort here.  Before the
1317         * sync the page will be made read-only, which will flush_hash_page.
1318         * BIG ISSUE here: if the kernel uses a page from userspace without
1319         * unmapping it first, it may see the speculated version.
1320         */
1321        if (local && cpu_has_feature(CPU_FTR_TM) &&
1322            current->thread.regs &&
1323            MSR_TM_ACTIVE(current->thread.regs->msr)) {
1324                tm_enable();
1325                tm_abort(TM_CAUSE_TLBI);
1326        }
1327#endif
1328}
1329
1330#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1331void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
1332                         pmd_t *pmdp, unsigned int psize, int ssize,
1333                         unsigned long flags)
1334{
1335        int i, max_hpte_count, valid;
1336        unsigned long s_addr;
1337        unsigned char *hpte_slot_array;
1338        unsigned long hidx, shift, vpn, hash, slot;
1339        int local = flags & HPTE_LOCAL_UPDATE;
1340
1341        s_addr = addr & HPAGE_PMD_MASK;
1342        hpte_slot_array = get_hpte_slot_array(pmdp);
1343        /*
1344         * IF we try to do a HUGE PTE update after a withdraw is done.
1345         * we will find the below NULL. This happens when we do
1346         * split_huge_page_pmd
1347         */
1348        if (!hpte_slot_array)
1349                return;
1350
1351        if (ppc_md.hugepage_invalidate) {
1352                ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
1353                                           psize, ssize, local);
1354                goto tm_abort;
1355        }
1356        /*
1357         * No bluk hpte removal support, invalidate each entry
1358         */
1359        shift = mmu_psize_defs[psize].shift;
1360        max_hpte_count = HPAGE_PMD_SIZE >> shift;
1361        for (i = 0; i < max_hpte_count; i++) {
1362                /*
1363                 * 8 bits per each hpte entries
1364                 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
1365                 */
1366                valid = hpte_valid(hpte_slot_array, i);
1367                if (!valid)
1368                        continue;
1369                hidx =  hpte_hash_index(hpte_slot_array, i);
1370
1371                /* get the vpn */
1372                addr = s_addr + (i * (1ul << shift));
1373                vpn = hpt_vpn(addr, vsid, ssize);
1374                hash = hpt_hash(vpn, shift, ssize);
1375                if (hidx & _PTEIDX_SECONDARY)
1376                        hash = ~hash;
1377
1378                slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1379                slot += hidx & _PTEIDX_GROUP_IX;
1380                ppc_md.hpte_invalidate(slot, vpn, psize,
1381                                       MMU_PAGE_16M, ssize, local);
1382        }
1383tm_abort:
1384#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1385        /* Transactions are not aborted by tlbiel, only tlbie.
1386         * Without, syncing a page back to a block device w/ PIO could pick up
1387         * transactional data (bad!) so we force an abort here.  Before the
1388         * sync the page will be made read-only, which will flush_hash_page.
1389         * BIG ISSUE here: if the kernel uses a page from userspace without
1390         * unmapping it first, it may see the speculated version.
1391         */
1392        if (local && cpu_has_feature(CPU_FTR_TM) &&
1393            current->thread.regs &&
1394            MSR_TM_ACTIVE(current->thread.regs->msr)) {
1395                tm_enable();
1396                tm_abort(TM_CAUSE_TLBI);
1397        }
1398#endif
1399        return;
1400}
1401#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1402
1403void flush_hash_range(unsigned long number, int local)
1404{
1405        if (ppc_md.flush_hash_range)
1406                ppc_md.flush_hash_range(number, local);
1407        else {
1408                int i;
1409                struct ppc64_tlb_batch *batch =
1410                        this_cpu_ptr(&ppc64_tlb_batch);
1411
1412                for (i = 0; i < number; i++)
1413                        flush_hash_page(batch->vpn[i], batch->pte[i],
1414                                        batch->psize, batch->ssize, local);
1415        }
1416}
1417
1418/*
1419 * low_hash_fault is called when we the low level hash code failed
1420 * to instert a PTE due to an hypervisor error
1421 */
1422void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
1423{
1424        enum ctx_state prev_state = exception_enter();
1425
1426        if (user_mode(regs)) {
1427#ifdef CONFIG_PPC_SUBPAGE_PROT
1428                if (rc == -2)
1429                        _exception(SIGSEGV, regs, SEGV_ACCERR, address);
1430                else
1431#endif
1432                        _exception(SIGBUS, regs, BUS_ADRERR, address);
1433        } else
1434                bad_page_fault(regs, address, SIGBUS);
1435
1436        exception_exit(prev_state);
1437}
1438
1439long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
1440                           unsigned long pa, unsigned long rflags,
1441                           unsigned long vflags, int psize, int ssize)
1442{
1443        unsigned long hpte_group;
1444        long slot;
1445
1446repeat:
1447        hpte_group = ((hash & htab_hash_mask) *
1448                       HPTES_PER_GROUP) & ~0x7UL;
1449
1450        /* Insert into the hash table, primary slot */
1451        slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
1452                                  psize, psize, ssize);
1453
1454        /* Primary is full, try the secondary */
1455        if (unlikely(slot == -1)) {
1456                hpte_group = ((~hash & htab_hash_mask) *
1457                              HPTES_PER_GROUP) & ~0x7UL;
1458                slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
1459                                          vflags | HPTE_V_SECONDARY,
1460                                          psize, psize, ssize);
1461                if (slot == -1) {
1462                        if (mftb() & 0x1)
1463                                hpte_group = ((hash & htab_hash_mask) *
1464                                              HPTES_PER_GROUP)&~0x7UL;
1465
1466                        ppc_md.hpte_remove(hpte_group);
1467                        goto repeat;
1468                }
1469        }
1470
1471        return slot;
1472}
1473
1474#ifdef CONFIG_DEBUG_PAGEALLOC
1475static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1476{
1477        unsigned long hash;
1478        unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1479        unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1480        unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
1481        long ret;
1482
1483        hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1484
1485        /* Don't create HPTE entries for bad address */
1486        if (!vsid)
1487                return;
1488
1489        ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
1490                                    HPTE_V_BOLTED,
1491                                    mmu_linear_psize, mmu_kernel_ssize);
1492
1493        BUG_ON (ret < 0);
1494        spin_lock(&linear_map_hash_lock);
1495        BUG_ON(linear_map_hash_slots[lmi] & 0x80);
1496        linear_map_hash_slots[lmi] = ret | 0x80;
1497        spin_unlock(&linear_map_hash_lock);
1498}
1499
1500static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1501{
1502        unsigned long hash, hidx, slot;
1503        unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1504        unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1505
1506        hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1507        spin_lock(&linear_map_hash_lock);
1508        BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
1509        hidx = linear_map_hash_slots[lmi] & 0x7f;
1510        linear_map_hash_slots[lmi] = 0;
1511        spin_unlock(&linear_map_hash_lock);
1512        if (hidx & _PTEIDX_SECONDARY)
1513                hash = ~hash;
1514        slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1515        slot += hidx & _PTEIDX_GROUP_IX;
1516        ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize,
1517                               mmu_kernel_ssize, 0);
1518}
1519
1520void __kernel_map_pages(struct page *page, int numpages, int enable)
1521{
1522        unsigned long flags, vaddr, lmi;
1523        int i;
1524
1525        local_irq_save(flags);
1526        for (i = 0; i < numpages; i++, page++) {
1527                vaddr = (unsigned long)page_address(page);
1528                lmi = __pa(vaddr) >> PAGE_SHIFT;
1529                if (lmi >= linear_map_hash_count)
1530                        continue;
1531                if (enable)
1532                        kernel_map_linear_page(vaddr, lmi);
1533                else
1534                        kernel_unmap_linear_page(vaddr, lmi);
1535        }
1536        local_irq_restore(flags);
1537}
1538#endif /* CONFIG_DEBUG_PAGEALLOC */
1539
1540void setup_initial_memory_limit(phys_addr_t first_memblock_base,
1541                                phys_addr_t first_memblock_size)
1542{
1543        /* We don't currently support the first MEMBLOCK not mapping 0
1544         * physical on those processors
1545         */
1546        BUG_ON(first_memblock_base != 0);
1547
1548        /* On LPAR systems, the first entry is our RMA region,
1549         * non-LPAR 64-bit hash MMU systems don't have a limitation
1550         * on real mode access, but using the first entry works well
1551         * enough. We also clamp it to 1G to avoid some funky things
1552         * such as RTAS bugs etc...
1553         */
1554        ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
1555
1556        /* Finally limit subsequent allocations */
1557        memblock_set_current_limit(ppc64_rma_size);
1558}
1559