linux/arch/powerpc/mm/tlb_nohash.c
<<
>>
Prefs
   1/*
   2 * This file contains the routines for TLB flushing.
   3 * On machines where the MMU does not use a hash table to store virtual to
   4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
   5 * this does -not- include 603 however which shares the implementation with
   6 * hash based processors)
   7 *
   8 *  -- BenH
   9 *
  10 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
  11 *                     IBM Corp.
  12 *
  13 *  Derived from arch/ppc/mm/init.c:
  14 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  15 *
  16 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  17 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  18 *    Copyright (C) 1996 Paul Mackerras
  19 *
  20 *  Derived from "arch/i386/mm/init.c"
  21 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  22 *
  23 *  This program is free software; you can redistribute it and/or
  24 *  modify it under the terms of the GNU General Public License
  25 *  as published by the Free Software Foundation; either version
  26 *  2 of the License, or (at your option) any later version.
  27 *
  28 */
  29
  30#include <linux/kernel.h>
  31#include <linux/export.h>
  32#include <linux/mm.h>
  33#include <linux/init.h>
  34#include <linux/highmem.h>
  35#include <linux/pagemap.h>
  36#include <linux/preempt.h>
  37#include <linux/spinlock.h>
  38#include <linux/memblock.h>
  39#include <linux/of_fdt.h>
  40#include <linux/hugetlb.h>
  41
  42#include <asm/tlbflush.h>
  43#include <asm/tlb.h>
  44#include <asm/code-patching.h>
  45#include <asm/hugetlb.h>
  46#include <asm/paca.h>
  47
  48#include "mmu_decl.h"
  49
  50/*
  51 * This struct lists the sw-supported page sizes.  The hardawre MMU may support
  52 * other sizes not listed here.   The .ind field is only used on MMUs that have
  53 * indirect page table entries.
  54 */
  55#ifdef CONFIG_PPC_BOOK3E_MMU
  56#ifdef CONFIG_PPC_FSL_BOOK3E
  57struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
  58        [MMU_PAGE_4K] = {
  59                .shift  = 12,
  60                .enc    = BOOK3E_PAGESZ_4K,
  61        },
  62        [MMU_PAGE_2M] = {
  63                .shift  = 21,
  64                .enc    = BOOK3E_PAGESZ_2M,
  65        },
  66        [MMU_PAGE_4M] = {
  67                .shift  = 22,
  68                .enc    = BOOK3E_PAGESZ_4M,
  69        },
  70        [MMU_PAGE_16M] = {
  71                .shift  = 24,
  72                .enc    = BOOK3E_PAGESZ_16M,
  73        },
  74        [MMU_PAGE_64M] = {
  75                .shift  = 26,
  76                .enc    = BOOK3E_PAGESZ_64M,
  77        },
  78        [MMU_PAGE_256M] = {
  79                .shift  = 28,
  80                .enc    = BOOK3E_PAGESZ_256M,
  81        },
  82        [MMU_PAGE_1G] = {
  83                .shift  = 30,
  84                .enc    = BOOK3E_PAGESZ_1GB,
  85        },
  86};
  87#else
  88struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
  89        [MMU_PAGE_4K] = {
  90                .shift  = 12,
  91                .ind    = 20,
  92                .enc    = BOOK3E_PAGESZ_4K,
  93        },
  94        [MMU_PAGE_16K] = {
  95                .shift  = 14,
  96                .enc    = BOOK3E_PAGESZ_16K,
  97        },
  98        [MMU_PAGE_64K] = {
  99                .shift  = 16,
 100                .ind    = 28,
 101                .enc    = BOOK3E_PAGESZ_64K,
 102        },
 103        [MMU_PAGE_1M] = {
 104                .shift  = 20,
 105                .enc    = BOOK3E_PAGESZ_1M,
 106        },
 107        [MMU_PAGE_16M] = {
 108                .shift  = 24,
 109                .ind    = 36,
 110                .enc    = BOOK3E_PAGESZ_16M,
 111        },
 112        [MMU_PAGE_256M] = {
 113                .shift  = 28,
 114                .enc    = BOOK3E_PAGESZ_256M,
 115        },
 116        [MMU_PAGE_1G] = {
 117                .shift  = 30,
 118                .enc    = BOOK3E_PAGESZ_1GB,
 119        },
 120};
 121#endif /* CONFIG_FSL_BOOKE */
 122
 123static inline int mmu_get_tsize(int psize)
 124{
 125        return mmu_psize_defs[psize].enc;
 126}
 127#else
 128static inline int mmu_get_tsize(int psize)
 129{
 130        /* This isn't used on !Book3E for now */
 131        return 0;
 132}
 133#endif /* CONFIG_PPC_BOOK3E_MMU */
 134
 135/* The variables below are currently only used on 64-bit Book3E
 136 * though this will probably be made common with other nohash
 137 * implementations at some point
 138 */
 139#ifdef CONFIG_PPC64
 140
 141int mmu_linear_psize;           /* Page size used for the linear mapping */
 142int mmu_pte_psize;              /* Page size used for PTE pages */
 143int mmu_vmemmap_psize;          /* Page size used for the virtual mem map */
 144int book3e_htw_mode;            /* HW tablewalk?  Value is PPC_HTW_* */
 145unsigned long linear_map_top;   /* Top of linear mapping */
 146
 147
 148/*
 149 * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
 150 * exceptions.  This is used for bolted and e6500 TLB miss handlers which
 151 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
 152 * this is set to zero.
 153 */
 154int extlb_level_exc;
 155
 156#endif /* CONFIG_PPC64 */
 157
 158#ifdef CONFIG_PPC_FSL_BOOK3E
 159/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
 160DEFINE_PER_CPU(int, next_tlbcam_idx);
 161EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
 162#endif
 163
 164/*
 165 * Base TLB flushing operations:
 166 *
 167 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 168 *  - flush_tlb_page(vma, vmaddr) flushes one page
 169 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 170 *  - flush_tlb_kernel_range(start, end) flushes kernel pages
 171 *
 172 *  - local_* variants of page and mm only apply to the current
 173 *    processor
 174 */
 175
 176/*
 177 * These are the base non-SMP variants of page and mm flushing
 178 */
 179void local_flush_tlb_mm(struct mm_struct *mm)
 180{
 181        unsigned int pid;
 182
 183        preempt_disable();
 184        pid = mm->context.id;
 185        if (pid != MMU_NO_CONTEXT)
 186                _tlbil_pid(pid);
 187        preempt_enable();
 188}
 189EXPORT_SYMBOL(local_flush_tlb_mm);
 190
 191void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
 192                            int tsize, int ind)
 193{
 194        unsigned int pid;
 195
 196        preempt_disable();
 197        pid = mm ? mm->context.id : 0;
 198        if (pid != MMU_NO_CONTEXT)
 199                _tlbil_va(vmaddr, pid, tsize, ind);
 200        preempt_enable();
 201}
 202
 203void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
 204{
 205        __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
 206                               mmu_get_tsize(mmu_virtual_psize), 0);
 207}
 208EXPORT_SYMBOL(local_flush_tlb_page);
 209
 210/*
 211 * And here are the SMP non-local implementations
 212 */
 213#ifdef CONFIG_SMP
 214
 215static DEFINE_RAW_SPINLOCK(tlbivax_lock);
 216
 217static int mm_is_core_local(struct mm_struct *mm)
 218{
 219        return cpumask_subset(mm_cpumask(mm),
 220                              topology_sibling_cpumask(smp_processor_id()));
 221}
 222
 223struct tlb_flush_param {
 224        unsigned long addr;
 225        unsigned int pid;
 226        unsigned int tsize;
 227        unsigned int ind;
 228};
 229
 230static void do_flush_tlb_mm_ipi(void *param)
 231{
 232        struct tlb_flush_param *p = param;
 233
 234        _tlbil_pid(p ? p->pid : 0);
 235}
 236
 237static void do_flush_tlb_page_ipi(void *param)
 238{
 239        struct tlb_flush_param *p = param;
 240
 241        _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
 242}
 243
 244
 245/* Note on invalidations and PID:
 246 *
 247 * We snapshot the PID with preempt disabled. At this point, it can still
 248 * change either because:
 249 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
 250 * - we are invaliating some target that isn't currently running here
 251 *   and is concurrently acquiring a new PID on another CPU
 252 * - some other CPU is re-acquiring a lost PID for this mm
 253 * etc...
 254 *
 255 * However, this shouldn't be a problem as we only guarantee
 256 * invalidation of TLB entries present prior to this call, so we
 257 * don't care about the PID changing, and invalidating a stale PID
 258 * is generally harmless.
 259 */
 260
 261void flush_tlb_mm(struct mm_struct *mm)
 262{
 263        unsigned int pid;
 264
 265        preempt_disable();
 266        pid = mm->context.id;
 267        if (unlikely(pid == MMU_NO_CONTEXT))
 268                goto no_context;
 269        if (!mm_is_core_local(mm)) {
 270                struct tlb_flush_param p = { .pid = pid };
 271                /* Ignores smp_processor_id() even if set. */
 272                smp_call_function_many(mm_cpumask(mm),
 273                                       do_flush_tlb_mm_ipi, &p, 1);
 274        }
 275        _tlbil_pid(pid);
 276 no_context:
 277        preempt_enable();
 278}
 279EXPORT_SYMBOL(flush_tlb_mm);
 280
 281void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
 282                      int tsize, int ind)
 283{
 284        struct cpumask *cpu_mask;
 285        unsigned int pid;
 286
 287        /*
 288         * This function as well as __local_flush_tlb_page() must only be called
 289         * for user contexts.
 290         */
 291        if (unlikely(WARN_ON(!mm)))
 292                return;
 293
 294        preempt_disable();
 295        pid = mm->context.id;
 296        if (unlikely(pid == MMU_NO_CONTEXT))
 297                goto bail;
 298        cpu_mask = mm_cpumask(mm);
 299        if (!mm_is_core_local(mm)) {
 300                /* If broadcast tlbivax is supported, use it */
 301                if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
 302                        int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
 303                        if (lock)
 304                                raw_spin_lock(&tlbivax_lock);
 305                        _tlbivax_bcast(vmaddr, pid, tsize, ind);
 306                        if (lock)
 307                                raw_spin_unlock(&tlbivax_lock);
 308                        goto bail;
 309                } else {
 310                        struct tlb_flush_param p = {
 311                                .pid = pid,
 312                                .addr = vmaddr,
 313                                .tsize = tsize,
 314                                .ind = ind,
 315                        };
 316                        /* Ignores smp_processor_id() even if set in cpu_mask */
 317                        smp_call_function_many(cpu_mask,
 318                                               do_flush_tlb_page_ipi, &p, 1);
 319                }
 320        }
 321        _tlbil_va(vmaddr, pid, tsize, ind);
 322 bail:
 323        preempt_enable();
 324}
 325
 326void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
 327{
 328#ifdef CONFIG_HUGETLB_PAGE
 329        if (vma && is_vm_hugetlb_page(vma))
 330                flush_hugetlb_page(vma, vmaddr);
 331#endif
 332
 333        __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
 334                         mmu_get_tsize(mmu_virtual_psize), 0);
 335}
 336EXPORT_SYMBOL(flush_tlb_page);
 337
 338#endif /* CONFIG_SMP */
 339
 340#ifdef CONFIG_PPC_47x
 341void __init early_init_mmu_47x(void)
 342{
 343#ifdef CONFIG_SMP
 344        unsigned long root = of_get_flat_dt_root();
 345        if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
 346                mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
 347#endif /* CONFIG_SMP */
 348}
 349#endif /* CONFIG_PPC_47x */
 350
 351/*
 352 * Flush kernel TLB entries in the given range
 353 */
 354void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 355{
 356#ifdef CONFIG_SMP
 357        preempt_disable();
 358        smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
 359        _tlbil_pid(0);
 360        preempt_enable();
 361#else
 362        _tlbil_pid(0);
 363#endif
 364}
 365EXPORT_SYMBOL(flush_tlb_kernel_range);
 366
 367/*
 368 * Currently, for range flushing, we just do a full mm flush. This should
 369 * be optimized based on a threshold on the size of the range, since
 370 * some implementation can stack multiple tlbivax before a tlbsync but
 371 * for now, we keep it that way
 372 */
 373void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 374                     unsigned long end)
 375
 376{
 377        flush_tlb_mm(vma->vm_mm);
 378}
 379EXPORT_SYMBOL(flush_tlb_range);
 380
 381void tlb_flush(struct mmu_gather *tlb)
 382{
 383        flush_tlb_mm(tlb->mm);
 384}
 385
 386/*
 387 * Below are functions specific to the 64-bit variant of Book3E though that
 388 * may change in the future
 389 */
 390
 391#ifdef CONFIG_PPC64
 392
 393/*
 394 * Handling of virtual linear page tables or indirect TLB entries
 395 * flushing when PTE pages are freed
 396 */
 397void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
 398{
 399        int tsize = mmu_psize_defs[mmu_pte_psize].enc;
 400
 401        if (book3e_htw_mode != PPC_HTW_NONE) {
 402                unsigned long start = address & PMD_MASK;
 403                unsigned long end = address + PMD_SIZE;
 404                unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
 405
 406                /* This isn't the most optimal, ideally we would factor out the
 407                 * while preempt & CPU mask mucking around, or even the IPI but
 408                 * it will do for now
 409                 */
 410                while (start < end) {
 411                        __flush_tlb_page(tlb->mm, start, tsize, 1);
 412                        start += size;
 413                }
 414        } else {
 415                unsigned long rmask = 0xf000000000000000ul;
 416                unsigned long rid = (address & rmask) | 0x1000000000000000ul;
 417                unsigned long vpte = address & ~rmask;
 418
 419#ifdef CONFIG_PPC_64K_PAGES
 420                vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
 421#else
 422                vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
 423#endif
 424                vpte |= rid;
 425                __flush_tlb_page(tlb->mm, vpte, tsize, 0);
 426        }
 427}
 428
 429static void setup_page_sizes(void)
 430{
 431        unsigned int tlb0cfg;
 432        unsigned int tlb0ps;
 433        unsigned int eptcfg;
 434        int i, psize;
 435
 436#ifdef CONFIG_PPC_FSL_BOOK3E
 437        unsigned int mmucfg = mfspr(SPRN_MMUCFG);
 438        int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
 439
 440        if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
 441                unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
 442                unsigned int min_pg, max_pg;
 443
 444                min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
 445                max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
 446
 447                for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 448                        struct mmu_psize_def *def;
 449                        unsigned int shift;
 450
 451                        def = &mmu_psize_defs[psize];
 452                        shift = def->shift;
 453
 454                        if (shift == 0 || shift & 1)
 455                                continue;
 456
 457                        /* adjust to be in terms of 4^shift Kb */
 458                        shift = (shift - 10) >> 1;
 459
 460                        if ((shift >= min_pg) && (shift <= max_pg))
 461                                def->flags |= MMU_PAGE_SIZE_DIRECT;
 462                }
 463
 464                goto out;
 465        }
 466
 467        if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
 468                u32 tlb1cfg, tlb1ps;
 469
 470                tlb0cfg = mfspr(SPRN_TLB0CFG);
 471                tlb1cfg = mfspr(SPRN_TLB1CFG);
 472                tlb1ps = mfspr(SPRN_TLB1PS);
 473                eptcfg = mfspr(SPRN_EPTCFG);
 474
 475                if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
 476                        book3e_htw_mode = PPC_HTW_E6500;
 477
 478                /*
 479                 * We expect 4K subpage size and unrestricted indirect size.
 480                 * The lack of a restriction on indirect size is a Freescale
 481                 * extension, indicated by PSn = 0 but SPSn != 0.
 482                 */
 483                if (eptcfg != 2)
 484                        book3e_htw_mode = PPC_HTW_NONE;
 485
 486                for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 487                        struct mmu_psize_def *def = &mmu_psize_defs[psize];
 488
 489                        if (tlb1ps & (1U << (def->shift - 10))) {
 490                                def->flags |= MMU_PAGE_SIZE_DIRECT;
 491
 492                                if (book3e_htw_mode && psize == MMU_PAGE_2M)
 493                                        def->flags |= MMU_PAGE_SIZE_INDIRECT;
 494                        }
 495                }
 496
 497                goto out;
 498        }
 499#endif
 500
 501        tlb0cfg = mfspr(SPRN_TLB0CFG);
 502        tlb0ps = mfspr(SPRN_TLB0PS);
 503        eptcfg = mfspr(SPRN_EPTCFG);
 504
 505        /* Look for supported direct sizes */
 506        for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 507                struct mmu_psize_def *def = &mmu_psize_defs[psize];
 508
 509                if (tlb0ps & (1U << (def->shift - 10)))
 510                        def->flags |= MMU_PAGE_SIZE_DIRECT;
 511        }
 512
 513        /* Indirect page sizes supported ? */
 514        if ((tlb0cfg & TLBnCFG_IND) == 0 ||
 515            (tlb0cfg & TLBnCFG_PT) == 0)
 516                goto out;
 517
 518        book3e_htw_mode = PPC_HTW_IBM;
 519
 520        /* Now, we only deal with one IND page size for each
 521         * direct size. Hopefully all implementations today are
 522         * unambiguous, but we might want to be careful in the
 523         * future.
 524         */
 525        for (i = 0; i < 3; i++) {
 526                unsigned int ps, sps;
 527
 528                sps = eptcfg & 0x1f;
 529                eptcfg >>= 5;
 530                ps = eptcfg & 0x1f;
 531                eptcfg >>= 5;
 532                if (!ps || !sps)
 533                        continue;
 534                for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
 535                        struct mmu_psize_def *def = &mmu_psize_defs[psize];
 536
 537                        if (ps == (def->shift - 10))
 538                                def->flags |= MMU_PAGE_SIZE_INDIRECT;
 539                        if (sps == (def->shift - 10))
 540                                def->ind = ps + 10;
 541                }
 542        }
 543
 544out:
 545        /* Cleanup array and print summary */
 546        pr_info("MMU: Supported page sizes\n");
 547        for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 548                struct mmu_psize_def *def = &mmu_psize_defs[psize];
 549                const char *__page_type_names[] = {
 550                        "unsupported",
 551                        "direct",
 552                        "indirect",
 553                        "direct & indirect"
 554                };
 555                if (def->flags == 0) {
 556                        def->shift = 0; 
 557                        continue;
 558                }
 559                pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
 560                        __page_type_names[def->flags & 0x3]);
 561        }
 562}
 563
 564static void setup_mmu_htw(void)
 565{
 566        /*
 567         * If we want to use HW tablewalk, enable it by patching the TLB miss
 568         * handlers to branch to the one dedicated to it.
 569         */
 570
 571        switch (book3e_htw_mode) {
 572        case PPC_HTW_IBM:
 573                patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
 574                patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
 575                break;
 576#ifdef CONFIG_PPC_FSL_BOOK3E
 577        case PPC_HTW_E6500:
 578                extlb_level_exc = EX_TLB_SIZE;
 579                patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
 580                patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
 581                break;
 582#endif
 583        }
 584        pr_info("MMU: Book3E HW tablewalk %s\n",
 585                book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
 586}
 587
 588/*
 589 * Early initialization of the MMU TLB code
 590 */
 591static void early_init_this_mmu(void)
 592{
 593        unsigned int mas4;
 594
 595        /* Set MAS4 based on page table setting */
 596
 597        mas4 = 0x4 << MAS4_WIMGED_SHIFT;
 598        switch (book3e_htw_mode) {
 599        case PPC_HTW_E6500:
 600                mas4 |= MAS4_INDD;
 601                mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
 602                mas4 |= MAS4_TLBSELD(1);
 603                mmu_pte_psize = MMU_PAGE_2M;
 604                break;
 605
 606        case PPC_HTW_IBM:
 607                mas4 |= MAS4_INDD;
 608#ifdef CONFIG_PPC_64K_PAGES
 609                mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
 610                mmu_pte_psize = MMU_PAGE_256M;
 611#else
 612                mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
 613                mmu_pte_psize = MMU_PAGE_1M;
 614#endif
 615                break;
 616
 617        case PPC_HTW_NONE:
 618#ifdef CONFIG_PPC_64K_PAGES
 619                mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
 620#else
 621                mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
 622#endif
 623                mmu_pte_psize = mmu_virtual_psize;
 624                break;
 625        }
 626        mtspr(SPRN_MAS4, mas4);
 627
 628#ifdef CONFIG_PPC_FSL_BOOK3E
 629        if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
 630                unsigned int num_cams;
 631
 632                /* use a quarter of the TLBCAM for bolted linear map */
 633                num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
 634                linear_map_top = map_mem_in_cams(linear_map_top, num_cams);
 635        }
 636#endif
 637
 638        /* A sync won't hurt us after mucking around with
 639         * the MMU configuration
 640         */
 641        mb();
 642}
 643
 644static void __init early_init_mmu_global(void)
 645{
 646        /* XXX This will have to be decided at runtime, but right
 647         * now our boot and TLB miss code hard wires it. Ideally
 648         * we should find out a suitable page size and patch the
 649         * TLB miss code (either that or use the PACA to store
 650         * the value we want)
 651         */
 652        mmu_linear_psize = MMU_PAGE_1G;
 653
 654        /* XXX This should be decided at runtime based on supported
 655         * page sizes in the TLB, but for now let's assume 16M is
 656         * always there and a good fit (which it probably is)
 657         *
 658         * Freescale booke only supports 4K pages in TLB0, so use that.
 659         */
 660        if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
 661                mmu_vmemmap_psize = MMU_PAGE_4K;
 662        else
 663                mmu_vmemmap_psize = MMU_PAGE_16M;
 664
 665        /* XXX This code only checks for TLB 0 capabilities and doesn't
 666         *     check what page size combos are supported by the HW. It
 667         *     also doesn't handle the case where a separate array holds
 668         *     the IND entries from the array loaded by the PT.
 669         */
 670        /* Look for supported page sizes */
 671        setup_page_sizes();
 672
 673        /* Look for HW tablewalk support */
 674        setup_mmu_htw();
 675
 676#ifdef CONFIG_PPC_FSL_BOOK3E
 677        if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
 678                if (book3e_htw_mode == PPC_HTW_NONE) {
 679                        extlb_level_exc = EX_TLB_SIZE;
 680                        patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
 681                        patch_exception(0x1e0,
 682                                exc_instruction_tlb_miss_bolted_book3e);
 683                }
 684        }
 685#endif
 686
 687        /* Set the global containing the top of the linear mapping
 688         * for use by the TLB miss code
 689         */
 690        linear_map_top = memblock_end_of_DRAM();
 691}
 692
 693static void __init early_mmu_set_memory_limit(void)
 694{
 695#ifdef CONFIG_PPC_FSL_BOOK3E
 696        if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
 697                /*
 698                 * Limit memory so we dont have linear faults.
 699                 * Unlike memblock_set_current_limit, which limits
 700                 * memory available during early boot, this permanently
 701                 * reduces the memory available to Linux.  We need to
 702                 * do this because highmem is not supported on 64-bit.
 703                 */
 704                memblock_enforce_memory_limit(linear_map_top);
 705        }
 706#endif
 707
 708        memblock_set_current_limit(linear_map_top);
 709}
 710
 711/* boot cpu only */
 712void __init early_init_mmu(void)
 713{
 714        early_init_mmu_global();
 715        early_init_this_mmu();
 716        early_mmu_set_memory_limit();
 717}
 718
 719void early_init_mmu_secondary(void)
 720{
 721        early_init_this_mmu();
 722}
 723
 724void setup_initial_memory_limit(phys_addr_t first_memblock_base,
 725                                phys_addr_t first_memblock_size)
 726{
 727        /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
 728         * the bolted TLB entry. We know for now that only 1G
 729         * entries are supported though that may eventually
 730         * change.
 731         *
 732         * on FSL Embedded 64-bit, we adjust the RMA size to match the
 733         * first bolted TLB entry size.  We still limit max to 1G even if
 734         * the TLB could cover more.  This is due to what the early init
 735         * code is setup to do.
 736         *
 737         * We crop it to the size of the first MEMBLOCK to
 738         * avoid going over total available memory just in case...
 739         */
 740#ifdef CONFIG_PPC_FSL_BOOK3E
 741        if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
 742                unsigned long linear_sz;
 743                linear_sz = calc_cam_sz(first_memblock_size, PAGE_OFFSET,
 744                                        first_memblock_base);
 745                ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
 746        } else
 747#endif
 748                ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
 749
 750        /* Finally limit subsequent allocations */
 751        memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
 752}
 753#else /* ! CONFIG_PPC64 */
 754void __init early_init_mmu(void)
 755{
 756#ifdef CONFIG_PPC_47x
 757        early_init_mmu_47x();
 758#endif
 759}
 760#endif /* CONFIG_PPC64 */
 761