linux/arch/powerpc/mm/book3s64/radix_pgtable.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Page table handling routines for radix page table.
   4 *
   5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
   6 */
   7
   8#define pr_fmt(fmt) "radix-mmu: " fmt
   9
  10#include <linux/io.h>
  11#include <linux/kernel.h>
  12#include <linux/sched/mm.h>
  13#include <linux/memblock.h>
  14#include <linux/of_fdt.h>
  15#include <linux/mm.h>
  16#include <linux/hugetlb.h>
  17#include <linux/string_helpers.h>
  18#include <linux/memory.h>
  19
  20#include <asm/pgalloc.h>
  21#include <asm/mmu_context.h>
  22#include <asm/dma.h>
  23#include <asm/machdep.h>
  24#include <asm/mmu.h>
  25#include <asm/firmware.h>
  26#include <asm/powernv.h>
  27#include <asm/sections.h>
  28#include <asm/smp.h>
  29#include <asm/trace.h>
  30#include <asm/uaccess.h>
  31#include <asm/ultravisor.h>
  32
  33#include <trace/events/thp.h>
  34
  35unsigned int mmu_pid_bits;
  36unsigned int mmu_base_pid;
  37unsigned long radix_mem_block_size __ro_after_init;
  38
  39static __ref void *early_alloc_pgtable(unsigned long size, int nid,
  40                        unsigned long region_start, unsigned long region_end)
  41{
  42        phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
  43        phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
  44        void *ptr;
  45
  46        if (region_start)
  47                min_addr = region_start;
  48        if (region_end)
  49                max_addr = region_end;
  50
  51        ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
  52
  53        if (!ptr)
  54                panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
  55                      __func__, size, size, nid, &min_addr, &max_addr);
  56
  57        return ptr;
  58}
  59
  60/*
  61 * When allocating pud or pmd pointers, we allocate a complete page
  62 * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
  63 * is to ensure that the page obtained from the memblock allocator
  64 * can be completely used as page table page and can be freed
  65 * correctly when the page table entries are removed.
  66 */
  67static int early_map_kernel_page(unsigned long ea, unsigned long pa,
  68                          pgprot_t flags,
  69                          unsigned int map_page_size,
  70                          int nid,
  71                          unsigned long region_start, unsigned long region_end)
  72{
  73        unsigned long pfn = pa >> PAGE_SHIFT;
  74        pgd_t *pgdp;
  75        p4d_t *p4dp;
  76        pud_t *pudp;
  77        pmd_t *pmdp;
  78        pte_t *ptep;
  79
  80        pgdp = pgd_offset_k(ea);
  81        p4dp = p4d_offset(pgdp, ea);
  82        if (p4d_none(*p4dp)) {
  83                pudp = early_alloc_pgtable(PAGE_SIZE, nid,
  84                                           region_start, region_end);
  85                p4d_populate(&init_mm, p4dp, pudp);
  86        }
  87        pudp = pud_offset(p4dp, ea);
  88        if (map_page_size == PUD_SIZE) {
  89                ptep = (pte_t *)pudp;
  90                goto set_the_pte;
  91        }
  92        if (pud_none(*pudp)) {
  93                pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
  94                                           region_end);
  95                pud_populate(&init_mm, pudp, pmdp);
  96        }
  97        pmdp = pmd_offset(pudp, ea);
  98        if (map_page_size == PMD_SIZE) {
  99                ptep = pmdp_ptep(pmdp);
 100                goto set_the_pte;
 101        }
 102        if (!pmd_present(*pmdp)) {
 103                ptep = early_alloc_pgtable(PAGE_SIZE, nid,
 104                                                region_start, region_end);
 105                pmd_populate_kernel(&init_mm, pmdp, ptep);
 106        }
 107        ptep = pte_offset_kernel(pmdp, ea);
 108
 109set_the_pte:
 110        set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
 111        smp_wmb();
 112        return 0;
 113}
 114
 115/*
 116 * nid, region_start, and region_end are hints to try to place the page
 117 * table memory in the same node or region.
 118 */
 119static int __map_kernel_page(unsigned long ea, unsigned long pa,
 120                          pgprot_t flags,
 121                          unsigned int map_page_size,
 122                          int nid,
 123                          unsigned long region_start, unsigned long region_end)
 124{
 125        unsigned long pfn = pa >> PAGE_SHIFT;
 126        pgd_t *pgdp;
 127        p4d_t *p4dp;
 128        pud_t *pudp;
 129        pmd_t *pmdp;
 130        pte_t *ptep;
 131        /*
 132         * Make sure task size is correct as per the max adddr
 133         */
 134        BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
 135
 136#ifdef CONFIG_PPC_64K_PAGES
 137        BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
 138#endif
 139
 140        if (unlikely(!slab_is_available()))
 141                return early_map_kernel_page(ea, pa, flags, map_page_size,
 142                                                nid, region_start, region_end);
 143
 144        /*
 145         * Should make page table allocation functions be able to take a
 146         * node, so we can place kernel page tables on the right nodes after
 147         * boot.
 148         */
 149        pgdp = pgd_offset_k(ea);
 150        p4dp = p4d_offset(pgdp, ea);
 151        pudp = pud_alloc(&init_mm, p4dp, ea);
 152        if (!pudp)
 153                return -ENOMEM;
 154        if (map_page_size == PUD_SIZE) {
 155                ptep = (pte_t *)pudp;
 156                goto set_the_pte;
 157        }
 158        pmdp = pmd_alloc(&init_mm, pudp, ea);
 159        if (!pmdp)
 160                return -ENOMEM;
 161        if (map_page_size == PMD_SIZE) {
 162                ptep = pmdp_ptep(pmdp);
 163                goto set_the_pte;
 164        }
 165        ptep = pte_alloc_kernel(pmdp, ea);
 166        if (!ptep)
 167                return -ENOMEM;
 168
 169set_the_pte:
 170        set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
 171        smp_wmb();
 172        return 0;
 173}
 174
 175int radix__map_kernel_page(unsigned long ea, unsigned long pa,
 176                          pgprot_t flags,
 177                          unsigned int map_page_size)
 178{
 179        return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
 180}
 181
 182#ifdef CONFIG_STRICT_KERNEL_RWX
 183void radix__change_memory_range(unsigned long start, unsigned long end,
 184                                unsigned long clear)
 185{
 186        unsigned long idx;
 187        pgd_t *pgdp;
 188        p4d_t *p4dp;
 189        pud_t *pudp;
 190        pmd_t *pmdp;
 191        pte_t *ptep;
 192
 193        start = ALIGN_DOWN(start, PAGE_SIZE);
 194        end = PAGE_ALIGN(end); // aligns up
 195
 196        pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
 197                 start, end, clear);
 198
 199        for (idx = start; idx < end; idx += PAGE_SIZE) {
 200                pgdp = pgd_offset_k(idx);
 201                p4dp = p4d_offset(pgdp, idx);
 202                pudp = pud_alloc(&init_mm, p4dp, idx);
 203                if (!pudp)
 204                        continue;
 205                if (pud_is_leaf(*pudp)) {
 206                        ptep = (pte_t *)pudp;
 207                        goto update_the_pte;
 208                }
 209                pmdp = pmd_alloc(&init_mm, pudp, idx);
 210                if (!pmdp)
 211                        continue;
 212                if (pmd_is_leaf(*pmdp)) {
 213                        ptep = pmdp_ptep(pmdp);
 214                        goto update_the_pte;
 215                }
 216                ptep = pte_alloc_kernel(pmdp, idx);
 217                if (!ptep)
 218                        continue;
 219update_the_pte:
 220                radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
 221        }
 222
 223        radix__flush_tlb_kernel_range(start, end);
 224}
 225
 226void radix__mark_rodata_ro(void)
 227{
 228        unsigned long start, end;
 229
 230        start = (unsigned long)_stext;
 231        end = (unsigned long)__init_begin;
 232
 233        radix__change_memory_range(start, end, _PAGE_WRITE);
 234}
 235
 236void radix__mark_initmem_nx(void)
 237{
 238        unsigned long start = (unsigned long)__init_begin;
 239        unsigned long end = (unsigned long)__init_end;
 240
 241        radix__change_memory_range(start, end, _PAGE_EXEC);
 242}
 243#endif /* CONFIG_STRICT_KERNEL_RWX */
 244
 245static inline void __meminit
 246print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
 247{
 248        char buf[10];
 249
 250        if (end <= start)
 251                return;
 252
 253        string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
 254
 255        pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
 256                exec ? " (exec)" : "");
 257}
 258
 259static unsigned long next_boundary(unsigned long addr, unsigned long end)
 260{
 261#ifdef CONFIG_STRICT_KERNEL_RWX
 262        if (addr < __pa_symbol(__init_begin))
 263                return __pa_symbol(__init_begin);
 264#endif
 265        return end;
 266}
 267
 268static int __meminit create_physical_mapping(unsigned long start,
 269                                             unsigned long end,
 270                                             unsigned long max_mapping_size,
 271                                             int nid, pgprot_t _prot)
 272{
 273        unsigned long vaddr, addr, mapping_size = 0;
 274        bool prev_exec, exec = false;
 275        pgprot_t prot;
 276        int psize;
 277
 278        start = ALIGN(start, PAGE_SIZE);
 279        end   = ALIGN_DOWN(end, PAGE_SIZE);
 280        for (addr = start; addr < end; addr += mapping_size) {
 281                unsigned long gap, previous_size;
 282                int rc;
 283
 284                gap = next_boundary(addr, end) - addr;
 285                if (gap > max_mapping_size)
 286                        gap = max_mapping_size;
 287                previous_size = mapping_size;
 288                prev_exec = exec;
 289
 290                if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
 291                    mmu_psize_defs[MMU_PAGE_1G].shift) {
 292                        mapping_size = PUD_SIZE;
 293                        psize = MMU_PAGE_1G;
 294                } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
 295                           mmu_psize_defs[MMU_PAGE_2M].shift) {
 296                        mapping_size = PMD_SIZE;
 297                        psize = MMU_PAGE_2M;
 298                } else {
 299                        mapping_size = PAGE_SIZE;
 300                        psize = mmu_virtual_psize;
 301                }
 302
 303                vaddr = (unsigned long)__va(addr);
 304
 305                if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
 306                    overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
 307                        prot = PAGE_KERNEL_X;
 308                        exec = true;
 309                } else {
 310                        prot = _prot;
 311                        exec = false;
 312                }
 313
 314                if (mapping_size != previous_size || exec != prev_exec) {
 315                        print_mapping(start, addr, previous_size, prev_exec);
 316                        start = addr;
 317                }
 318
 319                rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
 320                if (rc)
 321                        return rc;
 322
 323                update_page_count(psize, 1);
 324        }
 325
 326        print_mapping(start, addr, mapping_size, exec);
 327        return 0;
 328}
 329
 330static void __init radix_init_pgtable(void)
 331{
 332        unsigned long rts_field;
 333        phys_addr_t start, end;
 334        u64 i;
 335
 336        /* We don't support slb for radix */
 337        mmu_slb_size = 0;
 338
 339        /*
 340         * Create the linear mapping
 341         */
 342        for_each_mem_range(i, &start, &end) {
 343                /*
 344                 * The memblock allocator  is up at this point, so the
 345                 * page tables will be allocated within the range. No
 346                 * need or a node (which we don't have yet).
 347                 */
 348
 349                if (end >= RADIX_VMALLOC_START) {
 350                        pr_warn("Outside the supported range\n");
 351                        continue;
 352                }
 353
 354                WARN_ON(create_physical_mapping(start, end,
 355                                                radix_mem_block_size,
 356                                                -1, PAGE_KERNEL));
 357        }
 358
 359        /* Find out how many PID bits are supported */
 360        if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
 361                if (!mmu_pid_bits)
 362                        mmu_pid_bits = 20;
 363                mmu_base_pid = 1;
 364        } else if (cpu_has_feature(CPU_FTR_HVMODE)) {
 365                if (!mmu_pid_bits)
 366                        mmu_pid_bits = 20;
 367#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 368                /*
 369                 * When KVM is possible, we only use the top half of the
 370                 * PID space to avoid collisions between host and guest PIDs
 371                 * which can cause problems due to prefetch when exiting the
 372                 * guest with AIL=3
 373                 */
 374                mmu_base_pid = 1 << (mmu_pid_bits - 1);
 375#else
 376                mmu_base_pid = 1;
 377#endif
 378        } else {
 379                /* The guest uses the bottom half of the PID space */
 380                if (!mmu_pid_bits)
 381                        mmu_pid_bits = 19;
 382                mmu_base_pid = 1;
 383        }
 384
 385        /*
 386         * Allocate Partition table and process table for the
 387         * host.
 388         */
 389        BUG_ON(PRTB_SIZE_SHIFT > 36);
 390        process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
 391        /*
 392         * Fill in the process table.
 393         */
 394        rts_field = radix__get_tree_size();
 395        process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
 396
 397        /*
 398         * The init_mm context is given the first available (non-zero) PID,
 399         * which is the "guard PID" and contains no page table. PIDR should
 400         * never be set to zero because that duplicates the kernel address
 401         * space at the 0x0... offset (quadrant 0)!
 402         *
 403         * An arbitrary PID that may later be allocated by the PID allocator
 404         * for userspace processes must not be used either, because that
 405         * would cause stale user mappings for that PID on CPUs outside of
 406         * the TLB invalidation scheme (because it won't be in mm_cpumask).
 407         *
 408         * So permanently carve out one PID for the purpose of a guard PID.
 409         */
 410        init_mm.context.id = mmu_base_pid;
 411        mmu_base_pid++;
 412}
 413
 414static void __init radix_init_partition_table(void)
 415{
 416        unsigned long rts_field, dw0, dw1;
 417
 418        mmu_partition_table_init();
 419        rts_field = radix__get_tree_size();
 420        dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
 421        dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
 422        mmu_partition_table_set_entry(0, dw0, dw1, false);
 423
 424        pr_info("Initializing Radix MMU\n");
 425}
 426
 427static int __init get_idx_from_shift(unsigned int shift)
 428{
 429        int idx = -1;
 430
 431        switch (shift) {
 432        case 0xc:
 433                idx = MMU_PAGE_4K;
 434                break;
 435        case 0x10:
 436                idx = MMU_PAGE_64K;
 437                break;
 438        case 0x15:
 439                idx = MMU_PAGE_2M;
 440                break;
 441        case 0x1e:
 442                idx = MMU_PAGE_1G;
 443                break;
 444        }
 445        return idx;
 446}
 447
 448static int __init radix_dt_scan_page_sizes(unsigned long node,
 449                                           const char *uname, int depth,
 450                                           void *data)
 451{
 452        int size = 0;
 453        int shift, idx;
 454        unsigned int ap;
 455        const __be32 *prop;
 456        const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
 457
 458        /* We are scanning "cpu" nodes only */
 459        if (type == NULL || strcmp(type, "cpu") != 0)
 460                return 0;
 461
 462        /* Find MMU PID size */
 463        prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
 464        if (prop && size == 4)
 465                mmu_pid_bits = be32_to_cpup(prop);
 466
 467        /* Grab page size encodings */
 468        prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
 469        if (!prop)
 470                return 0;
 471
 472        pr_info("Page sizes from device-tree:\n");
 473        for (; size >= 4; size -= 4, ++prop) {
 474
 475                struct mmu_psize_def *def;
 476
 477                /* top 3 bit is AP encoding */
 478                shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
 479                ap = be32_to_cpu(prop[0]) >> 29;
 480                pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
 481
 482                idx = get_idx_from_shift(shift);
 483                if (idx < 0)
 484                        continue;
 485
 486                def = &mmu_psize_defs[idx];
 487                def->shift = shift;
 488                def->ap  = ap;
 489        }
 490
 491        /* needed ? */
 492        cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
 493        return 1;
 494}
 495
 496#ifdef CONFIG_MEMORY_HOTPLUG
 497static int __init probe_memory_block_size(unsigned long node, const char *uname, int
 498                                          depth, void *data)
 499{
 500        unsigned long *mem_block_size = (unsigned long *)data;
 501        const __be32 *prop;
 502        int len;
 503
 504        if (depth != 1)
 505                return 0;
 506
 507        if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
 508                return 0;
 509
 510        prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
 511
 512        if (!prop || len < dt_root_size_cells * sizeof(__be32))
 513                /*
 514                 * Nothing in the device tree
 515                 */
 516                *mem_block_size = MIN_MEMORY_BLOCK_SIZE;
 517        else
 518                *mem_block_size = of_read_number(prop, dt_root_size_cells);
 519        return 1;
 520}
 521
 522static unsigned long radix_memory_block_size(void)
 523{
 524        unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
 525
 526        /*
 527         * OPAL firmware feature is set by now. Hence we are ok
 528         * to test OPAL feature.
 529         */
 530        if (firmware_has_feature(FW_FEATURE_OPAL))
 531                mem_block_size = 1UL * 1024 * 1024 * 1024;
 532        else
 533                of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
 534
 535        return mem_block_size;
 536}
 537
 538#else   /* CONFIG_MEMORY_HOTPLUG */
 539
 540static unsigned long radix_memory_block_size(void)
 541{
 542        return 1UL * 1024 * 1024 * 1024;
 543}
 544
 545#endif /* CONFIG_MEMORY_HOTPLUG */
 546
 547
 548void __init radix__early_init_devtree(void)
 549{
 550        int rc;
 551
 552        /*
 553         * Try to find the available page sizes in the device-tree
 554         */
 555        rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
 556        if (!rc) {
 557                /*
 558                 * No page size details found in device tree.
 559                 * Let's assume we have page 4k and 64k support
 560                 */
 561                mmu_psize_defs[MMU_PAGE_4K].shift = 12;
 562                mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
 563
 564                mmu_psize_defs[MMU_PAGE_64K].shift = 16;
 565                mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
 566        }
 567
 568        /*
 569         * Max mapping size used when mapping pages. We don't use
 570         * ppc_md.memory_block_size() here because this get called
 571         * early and we don't have machine probe called yet. Also
 572         * the pseries implementation only check for ibm,lmb-size.
 573         * All hypervisor supporting radix do expose that device
 574         * tree node.
 575         */
 576        radix_mem_block_size = radix_memory_block_size();
 577        return;
 578}
 579
 580static void radix_init_amor(void)
 581{
 582        /*
 583        * In HV mode, we init AMOR (Authority Mask Override Register) so that
 584        * the hypervisor and guest can setup IAMR (Instruction Authority Mask
 585        * Register), enable key 0 and set it to 1.
 586        *
 587        * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
 588        */
 589        mtspr(SPRN_AMOR, (3ul << 62));
 590}
 591
 592#ifdef CONFIG_PPC_KUEP
 593void setup_kuep(bool disabled)
 594{
 595        if (disabled || !early_radix_enabled())
 596                return;
 597
 598        if (smp_processor_id() == boot_cpuid) {
 599                pr_info("Activating Kernel Userspace Execution Prevention\n");
 600                cur_cpu_spec->mmu_features |= MMU_FTR_KUEP;
 601        }
 602
 603        /*
 604         * Radix always uses key0 of the IAMR to determine if an access is
 605         * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
 606         * fetch.
 607         */
 608        mtspr(SPRN_IAMR, (1ul << 62));
 609}
 610#endif
 611
 612#ifdef CONFIG_PPC_KUAP
 613void setup_kuap(bool disabled)
 614{
 615        if (disabled || !early_radix_enabled())
 616                return;
 617
 618        if (smp_processor_id() == boot_cpuid) {
 619                pr_info("Activating Kernel Userspace Access Prevention\n");
 620                cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP;
 621        }
 622
 623        /* Make sure userspace can't change the AMR */
 624        mtspr(SPRN_UAMOR, 0);
 625
 626        /*
 627         * Set the default kernel AMR values on all cpus.
 628         */
 629        mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
 630        isync();
 631}
 632#endif
 633
 634void __init radix__early_init_mmu(void)
 635{
 636        unsigned long lpcr;
 637
 638#ifdef CONFIG_PPC_64K_PAGES
 639        /* PAGE_SIZE mappings */
 640        mmu_virtual_psize = MMU_PAGE_64K;
 641#else
 642        mmu_virtual_psize = MMU_PAGE_4K;
 643#endif
 644
 645#ifdef CONFIG_SPARSEMEM_VMEMMAP
 646        /* vmemmap mapping */
 647        if (mmu_psize_defs[MMU_PAGE_2M].shift) {
 648                /*
 649                 * map vmemmap using 2M if available
 650                 */
 651                mmu_vmemmap_psize = MMU_PAGE_2M;
 652        } else
 653                mmu_vmemmap_psize = mmu_virtual_psize;
 654#endif
 655        /*
 656         * initialize page table size
 657         */
 658        __pte_index_size = RADIX_PTE_INDEX_SIZE;
 659        __pmd_index_size = RADIX_PMD_INDEX_SIZE;
 660        __pud_index_size = RADIX_PUD_INDEX_SIZE;
 661        __pgd_index_size = RADIX_PGD_INDEX_SIZE;
 662        __pud_cache_index = RADIX_PUD_INDEX_SIZE;
 663        __pte_table_size = RADIX_PTE_TABLE_SIZE;
 664        __pmd_table_size = RADIX_PMD_TABLE_SIZE;
 665        __pud_table_size = RADIX_PUD_TABLE_SIZE;
 666        __pgd_table_size = RADIX_PGD_TABLE_SIZE;
 667
 668        __pmd_val_bits = RADIX_PMD_VAL_BITS;
 669        __pud_val_bits = RADIX_PUD_VAL_BITS;
 670        __pgd_val_bits = RADIX_PGD_VAL_BITS;
 671
 672        __kernel_virt_start = RADIX_KERN_VIRT_START;
 673        __vmalloc_start = RADIX_VMALLOC_START;
 674        __vmalloc_end = RADIX_VMALLOC_END;
 675        __kernel_io_start = RADIX_KERN_IO_START;
 676        __kernel_io_end = RADIX_KERN_IO_END;
 677        vmemmap = (struct page *)RADIX_VMEMMAP_START;
 678        ioremap_bot = IOREMAP_BASE;
 679
 680#ifdef CONFIG_PCI
 681        pci_io_base = ISA_IO_BASE;
 682#endif
 683        __pte_frag_nr = RADIX_PTE_FRAG_NR;
 684        __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
 685        __pmd_frag_nr = RADIX_PMD_FRAG_NR;
 686        __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
 687
 688        radix_init_pgtable();
 689
 690        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
 691                lpcr = mfspr(SPRN_LPCR);
 692                mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
 693                radix_init_partition_table();
 694                radix_init_amor();
 695        } else {
 696                radix_init_pseries();
 697        }
 698
 699        memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
 700
 701        /* Switch to the guard PID before turning on MMU */
 702        radix__switch_mmu_context(NULL, &init_mm);
 703        tlbiel_all();
 704}
 705
 706void radix__early_init_mmu_secondary(void)
 707{
 708        unsigned long lpcr;
 709        /*
 710         * update partition table control register and UPRT
 711         */
 712        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
 713                lpcr = mfspr(SPRN_LPCR);
 714                mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
 715
 716                set_ptcr_when_no_uv(__pa(partition_tb) |
 717                                    (PATB_SIZE_SHIFT - 12));
 718
 719                radix_init_amor();
 720        }
 721
 722        radix__switch_mmu_context(NULL, &init_mm);
 723        tlbiel_all();
 724}
 725
 726void radix__mmu_cleanup_all(void)
 727{
 728        unsigned long lpcr;
 729
 730        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
 731                lpcr = mfspr(SPRN_LPCR);
 732                mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
 733                set_ptcr_when_no_uv(0);
 734                powernv_set_nmmu_ptcr(0);
 735                radix__flush_tlb_all();
 736        }
 737}
 738
 739#ifdef CONFIG_MEMORY_HOTPLUG
 740static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
 741{
 742        pte_t *pte;
 743        int i;
 744
 745        for (i = 0; i < PTRS_PER_PTE; i++) {
 746                pte = pte_start + i;
 747                if (!pte_none(*pte))
 748                        return;
 749        }
 750
 751        pte_free_kernel(&init_mm, pte_start);
 752        pmd_clear(pmd);
 753}
 754
 755static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
 756{
 757        pmd_t *pmd;
 758        int i;
 759
 760        for (i = 0; i < PTRS_PER_PMD; i++) {
 761                pmd = pmd_start + i;
 762                if (!pmd_none(*pmd))
 763                        return;
 764        }
 765
 766        pmd_free(&init_mm, pmd_start);
 767        pud_clear(pud);
 768}
 769
 770static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
 771{
 772        pud_t *pud;
 773        int i;
 774
 775        for (i = 0; i < PTRS_PER_PUD; i++) {
 776                pud = pud_start + i;
 777                if (!pud_none(*pud))
 778                        return;
 779        }
 780
 781        pud_free(&init_mm, pud_start);
 782        p4d_clear(p4d);
 783}
 784
 785static void remove_pte_table(pte_t *pte_start, unsigned long addr,
 786                             unsigned long end)
 787{
 788        unsigned long next;
 789        pte_t *pte;
 790
 791        pte = pte_start + pte_index(addr);
 792        for (; addr < end; addr = next, pte++) {
 793                next = (addr + PAGE_SIZE) & PAGE_MASK;
 794                if (next > end)
 795                        next = end;
 796
 797                if (!pte_present(*pte))
 798                        continue;
 799
 800                if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
 801                        /*
 802                         * The vmemmap_free() and remove_section_mapping()
 803                         * codepaths call us with aligned addresses.
 804                         */
 805                        WARN_ONCE(1, "%s: unaligned range\n", __func__);
 806                        continue;
 807                }
 808
 809                pte_clear(&init_mm, addr, pte);
 810        }
 811}
 812
 813static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
 814                             unsigned long end)
 815{
 816        unsigned long next;
 817        pte_t *pte_base;
 818        pmd_t *pmd;
 819
 820        pmd = pmd_start + pmd_index(addr);
 821        for (; addr < end; addr = next, pmd++) {
 822                next = pmd_addr_end(addr, end);
 823
 824                if (!pmd_present(*pmd))
 825                        continue;
 826
 827                if (pmd_is_leaf(*pmd)) {
 828                        if (!IS_ALIGNED(addr, PMD_SIZE) ||
 829                            !IS_ALIGNED(next, PMD_SIZE)) {
 830                                WARN_ONCE(1, "%s: unaligned range\n", __func__);
 831                                continue;
 832                        }
 833                        pte_clear(&init_mm, addr, (pte_t *)pmd);
 834                        continue;
 835                }
 836
 837                pte_base = (pte_t *)pmd_page_vaddr(*pmd);
 838                remove_pte_table(pte_base, addr, next);
 839                free_pte_table(pte_base, pmd);
 840        }
 841}
 842
 843static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
 844                             unsigned long end)
 845{
 846        unsigned long next;
 847        pmd_t *pmd_base;
 848        pud_t *pud;
 849
 850        pud = pud_start + pud_index(addr);
 851        for (; addr < end; addr = next, pud++) {
 852                next = pud_addr_end(addr, end);
 853
 854                if (!pud_present(*pud))
 855                        continue;
 856
 857                if (pud_is_leaf(*pud)) {
 858                        if (!IS_ALIGNED(addr, PUD_SIZE) ||
 859                            !IS_ALIGNED(next, PUD_SIZE)) {
 860                                WARN_ONCE(1, "%s: unaligned range\n", __func__);
 861                                continue;
 862                        }
 863                        pte_clear(&init_mm, addr, (pte_t *)pud);
 864                        continue;
 865                }
 866
 867                pmd_base = (pmd_t *)pud_page_vaddr(*pud);
 868                remove_pmd_table(pmd_base, addr, next);
 869                free_pmd_table(pmd_base, pud);
 870        }
 871}
 872
 873static void __meminit remove_pagetable(unsigned long start, unsigned long end)
 874{
 875        unsigned long addr, next;
 876        pud_t *pud_base;
 877        pgd_t *pgd;
 878        p4d_t *p4d;
 879
 880        spin_lock(&init_mm.page_table_lock);
 881
 882        for (addr = start; addr < end; addr = next) {
 883                next = pgd_addr_end(addr, end);
 884
 885                pgd = pgd_offset_k(addr);
 886                p4d = p4d_offset(pgd, addr);
 887                if (!p4d_present(*p4d))
 888                        continue;
 889
 890                if (p4d_is_leaf(*p4d)) {
 891                        if (!IS_ALIGNED(addr, P4D_SIZE) ||
 892                            !IS_ALIGNED(next, P4D_SIZE)) {
 893                                WARN_ONCE(1, "%s: unaligned range\n", __func__);
 894                                continue;
 895                        }
 896
 897                        pte_clear(&init_mm, addr, (pte_t *)pgd);
 898                        continue;
 899                }
 900
 901                pud_base = (pud_t *)p4d_page_vaddr(*p4d);
 902                remove_pud_table(pud_base, addr, next);
 903                free_pud_table(pud_base, p4d);
 904        }
 905
 906        spin_unlock(&init_mm.page_table_lock);
 907        radix__flush_tlb_kernel_range(start, end);
 908}
 909
 910int __meminit radix__create_section_mapping(unsigned long start,
 911                                            unsigned long end, int nid,
 912                                            pgprot_t prot)
 913{
 914        if (end >= RADIX_VMALLOC_START) {
 915                pr_warn("Outside the supported range\n");
 916                return -1;
 917        }
 918
 919        return create_physical_mapping(__pa(start), __pa(end),
 920                                       radix_mem_block_size, nid, prot);
 921}
 922
 923int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
 924{
 925        remove_pagetable(start, end);
 926        return 0;
 927}
 928#endif /* CONFIG_MEMORY_HOTPLUG */
 929
 930#ifdef CONFIG_SPARSEMEM_VMEMMAP
 931static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
 932                                 pgprot_t flags, unsigned int map_page_size,
 933                                 int nid)
 934{
 935        return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
 936}
 937
 938int __meminit radix__vmemmap_create_mapping(unsigned long start,
 939                                      unsigned long page_size,
 940                                      unsigned long phys)
 941{
 942        /* Create a PTE encoding */
 943        unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
 944        int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
 945        int ret;
 946
 947        if ((start + page_size) >= RADIX_VMEMMAP_END) {
 948                pr_warn("Outside the supported range\n");
 949                return -1;
 950        }
 951
 952        ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
 953        BUG_ON(ret);
 954
 955        return 0;
 956}
 957
 958#ifdef CONFIG_MEMORY_HOTPLUG
 959void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
 960{
 961        remove_pagetable(start, start + page_size);
 962}
 963#endif
 964#endif
 965
 966#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 967
 968unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
 969                                  pmd_t *pmdp, unsigned long clr,
 970                                  unsigned long set)
 971{
 972        unsigned long old;
 973
 974#ifdef CONFIG_DEBUG_VM
 975        WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
 976        assert_spin_locked(pmd_lockptr(mm, pmdp));
 977#endif
 978
 979        old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
 980        trace_hugepage_update(addr, old, clr, set);
 981
 982        return old;
 983}
 984
 985pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
 986                        pmd_t *pmdp)
 987
 988{
 989        pmd_t pmd;
 990
 991        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 992        VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
 993        VM_BUG_ON(pmd_devmap(*pmdp));
 994        /*
 995         * khugepaged calls this for normal pmd
 996         */
 997        pmd = *pmdp;
 998        pmd_clear(pmdp);
 999
1000        /*
1001         * pmdp collapse_flush need to ensure that there are no parallel gup
1002         * walk after this call. This is needed so that we can have stable
1003         * page ref count when collapsing a page. We don't allow a collapse page
1004         * if we have gup taken on the page. We can ensure that by sending IPI
1005         * because gup walk happens with IRQ disabled.
1006         */
1007        serialize_against_pte_lookup(vma->vm_mm);
1008
1009        radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
1010
1011        return pmd;
1012}
1013
1014/*
1015 * For us pgtable_t is pte_t *. Inorder to save the deposisted
1016 * page table, we consider the allocated page table as a list
1017 * head. On withdraw we need to make sure we zero out the used
1018 * list_head memory area.
1019 */
1020void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1021                                 pgtable_t pgtable)
1022{
1023        struct list_head *lh = (struct list_head *) pgtable;
1024
1025        assert_spin_locked(pmd_lockptr(mm, pmdp));
1026
1027        /* FIFO */
1028        if (!pmd_huge_pte(mm, pmdp))
1029                INIT_LIST_HEAD(lh);
1030        else
1031                list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1032        pmd_huge_pte(mm, pmdp) = pgtable;
1033}
1034
1035pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1036{
1037        pte_t *ptep;
1038        pgtable_t pgtable;
1039        struct list_head *lh;
1040
1041        assert_spin_locked(pmd_lockptr(mm, pmdp));
1042
1043        /* FIFO */
1044        pgtable = pmd_huge_pte(mm, pmdp);
1045        lh = (struct list_head *) pgtable;
1046        if (list_empty(lh))
1047                pmd_huge_pte(mm, pmdp) = NULL;
1048        else {
1049                pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1050                list_del(lh);
1051        }
1052        ptep = (pte_t *) pgtable;
1053        *ptep = __pte(0);
1054        ptep++;
1055        *ptep = __pte(0);
1056        return pgtable;
1057}
1058
1059pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1060                                     unsigned long addr, pmd_t *pmdp)
1061{
1062        pmd_t old_pmd;
1063        unsigned long old;
1064
1065        old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1066        old_pmd = __pmd(old);
1067        return old_pmd;
1068}
1069
1070#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1071
1072void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1073                                  pte_t entry, unsigned long address, int psize)
1074{
1075        struct mm_struct *mm = vma->vm_mm;
1076        unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1077                                              _PAGE_RW | _PAGE_EXEC);
1078
1079        unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1080        /*
1081         * To avoid NMMU hang while relaxing access, we need mark
1082         * the pte invalid in between.
1083         */
1084        if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
1085                unsigned long old_pte, new_pte;
1086
1087                old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1088                /*
1089                 * new value of pte
1090                 */
1091                new_pte = old_pte | set;
1092                radix__flush_tlb_page_psize(mm, address, psize);
1093                __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1094        } else {
1095                __radix_pte_update(ptep, 0, set);
1096                /*
1097                 * Book3S does not require a TLB flush when relaxing access
1098                 * restrictions when the address space is not attached to a
1099                 * NMMU, because the core MMU will reload the pte after taking
1100                 * an access fault, which is defined by the architectue.
1101                 */
1102        }
1103        /* See ptesync comment in radix__set_pte_at */
1104}
1105
1106void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1107                                    unsigned long addr, pte_t *ptep,
1108                                    pte_t old_pte, pte_t pte)
1109{
1110        struct mm_struct *mm = vma->vm_mm;
1111
1112        /*
1113         * To avoid NMMU hang while relaxing access we need to flush the tlb before
1114         * we set the new value. We need to do this only for radix, because hash
1115         * translation does flush when updating the linux pte.
1116         */
1117        if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1118            (atomic_read(&mm->context.copros) > 0))
1119                radix__flush_tlb_page(vma, addr);
1120
1121        set_pte_at(mm, addr, ptep, pte);
1122}
1123
1124int __init arch_ioremap_pud_supported(void)
1125{
1126        /* HPT does not cope with large pages in the vmalloc area */
1127        return radix_enabled();
1128}
1129
1130int __init arch_ioremap_pmd_supported(void)
1131{
1132        return radix_enabled();
1133}
1134
1135int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1136{
1137        return 0;
1138}
1139
1140int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1141{
1142        pte_t *ptep = (pte_t *)pud;
1143        pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1144
1145        if (!radix_enabled())
1146                return 0;
1147
1148        set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1149
1150        return 1;
1151}
1152
1153int pud_clear_huge(pud_t *pud)
1154{
1155        if (pud_huge(*pud)) {
1156                pud_clear(pud);
1157                return 1;
1158        }
1159
1160        return 0;
1161}
1162
1163int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1164{
1165        pmd_t *pmd;
1166        int i;
1167
1168        pmd = (pmd_t *)pud_page_vaddr(*pud);
1169        pud_clear(pud);
1170
1171        flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1172
1173        for (i = 0; i < PTRS_PER_PMD; i++) {
1174                if (!pmd_none(pmd[i])) {
1175                        pte_t *pte;
1176                        pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1177
1178                        pte_free_kernel(&init_mm, pte);
1179                }
1180        }
1181
1182        pmd_free(&init_mm, pmd);
1183
1184        return 1;
1185}
1186
1187int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1188{
1189        pte_t *ptep = (pte_t *)pmd;
1190        pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1191
1192        if (!radix_enabled())
1193                return 0;
1194
1195        set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1196
1197        return 1;
1198}
1199
1200int pmd_clear_huge(pmd_t *pmd)
1201{
1202        if (pmd_huge(*pmd)) {
1203                pmd_clear(pmd);
1204                return 1;
1205        }
1206
1207        return 0;
1208}
1209
1210int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1211{
1212        pte_t *pte;
1213
1214        pte = (pte_t *)pmd_page_vaddr(*pmd);
1215        pmd_clear(pmd);
1216
1217        flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1218
1219        pte_free_kernel(&init_mm, pte);
1220
1221        return 1;
1222}
1223
1224int __init arch_ioremap_p4d_supported(void)
1225{
1226        return 0;
1227}
1228