linux/arch/powerpc/kvm/book3s_64_mmu_radix.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/string.h>
   9#include <linux/kvm.h>
  10#include <linux/kvm_host.h>
  11#include <linux/anon_inodes.h>
  12#include <linux/file.h>
  13#include <linux/debugfs.h>
  14#include <linux/pgtable.h>
  15
  16#include <asm/kvm_ppc.h>
  17#include <asm/kvm_book3s.h>
  18#include <asm/page.h>
  19#include <asm/mmu.h>
  20#include <asm/pgalloc.h>
  21#include <asm/pte-walk.h>
  22#include <asm/ultravisor.h>
  23#include <asm/kvm_book3s_uvmem.h>
  24#include <asm/plpar_wrappers.h>
  25
  26/*
  27 * Supported radix tree geometry.
  28 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
  29 * for a page size of 64k or 4k.
  30 */
  31static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
  32
  33unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
  34                                              gva_t eaddr, void *to, void *from,
  35                                              unsigned long n)
  36{
  37        int old_pid, old_lpid;
  38        unsigned long quadrant, ret = n;
  39        bool is_load = !!to;
  40
  41        /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
  42        if (kvmhv_on_pseries())
  43                return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
  44                                          (to != NULL) ? __pa(to): 0,
  45                                          (from != NULL) ? __pa(from): 0, n);
  46
  47        if (eaddr & (0xFFFUL << 52))
  48                return ret;
  49
  50        quadrant = 1;
  51        if (!pid)
  52                quadrant = 2;
  53        if (is_load)
  54                from = (void *) (eaddr | (quadrant << 62));
  55        else
  56                to = (void *) (eaddr | (quadrant << 62));
  57
  58        preempt_disable();
  59
  60        /* switch the lpid first to avoid running host with unallocated pid */
  61        old_lpid = mfspr(SPRN_LPID);
  62        if (old_lpid != lpid)
  63                mtspr(SPRN_LPID, lpid);
  64        if (quadrant == 1) {
  65                old_pid = mfspr(SPRN_PID);
  66                if (old_pid != pid)
  67                        mtspr(SPRN_PID, pid);
  68        }
  69        isync();
  70
  71        pagefault_disable();
  72        if (is_load)
  73                ret = __copy_from_user_inatomic(to, (const void __user *)from, n);
  74        else
  75                ret = __copy_to_user_inatomic((void __user *)to, from, n);
  76        pagefault_enable();
  77
  78        /* switch the pid first to avoid running host with unallocated pid */
  79        if (quadrant == 1 && pid != old_pid)
  80                mtspr(SPRN_PID, old_pid);
  81        if (lpid != old_lpid)
  82                mtspr(SPRN_LPID, old_lpid);
  83        isync();
  84
  85        preempt_enable();
  86
  87        return ret;
  88}
  89
  90static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
  91                                          void *to, void *from, unsigned long n)
  92{
  93        int lpid = vcpu->kvm->arch.lpid;
  94        int pid = vcpu->arch.pid;
  95
  96        /* This would cause a data segment intr so don't allow the access */
  97        if (eaddr & (0x3FFUL << 52))
  98                return -EINVAL;
  99
 100        /* Should we be using the nested lpid */
 101        if (vcpu->arch.nested)
 102                lpid = vcpu->arch.nested->shadow_lpid;
 103
 104        /* If accessing quadrant 3 then pid is expected to be 0 */
 105        if (((eaddr >> 62) & 0x3) == 0x3)
 106                pid = 0;
 107
 108        eaddr &= ~(0xFFFUL << 52);
 109
 110        return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n);
 111}
 112
 113long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
 114                                 unsigned long n)
 115{
 116        long ret;
 117
 118        ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n);
 119        if (ret > 0)
 120                memset(to + (n - ret), 0, ret);
 121
 122        return ret;
 123}
 124
 125long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
 126                               unsigned long n)
 127{
 128        return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
 129}
 130
 131int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
 132                               struct kvmppc_pte *gpte, u64 root,
 133                               u64 *pte_ret_p)
 134{
 135        struct kvm *kvm = vcpu->kvm;
 136        int ret, level, ps;
 137        unsigned long rts, bits, offset, index;
 138        u64 pte, base, gpa;
 139        __be64 rpte;
 140
 141        rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
 142                ((root & RTS2_MASK) >> RTS2_SHIFT);
 143        bits = root & RPDS_MASK;
 144        base = root & RPDB_MASK;
 145
 146        offset = rts + 31;
 147
 148        /* Current implementations only support 52-bit space */
 149        if (offset != 52)
 150                return -EINVAL;
 151
 152        /* Walk each level of the radix tree */
 153        for (level = 3; level >= 0; --level) {
 154                u64 addr;
 155                /* Check a valid size */
 156                if (level && bits != p9_supported_radix_bits[level])
 157                        return -EINVAL;
 158                if (level == 0 && !(bits == 5 || bits == 9))
 159                        return -EINVAL;
 160                offset -= bits;
 161                index = (eaddr >> offset) & ((1UL << bits) - 1);
 162                /* Check that low bits of page table base are zero */
 163                if (base & ((1UL << (bits + 3)) - 1))
 164                        return -EINVAL;
 165                /* Read the entry from guest memory */
 166                addr = base + (index * sizeof(rpte));
 167                vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 168                ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
 169                srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
 170                if (ret) {
 171                        if (pte_ret_p)
 172                                *pte_ret_p = addr;
 173                        return ret;
 174                }
 175                pte = __be64_to_cpu(rpte);
 176                if (!(pte & _PAGE_PRESENT))
 177                        return -ENOENT;
 178                /* Check if a leaf entry */
 179                if (pte & _PAGE_PTE)
 180                        break;
 181                /* Get ready to walk the next level */
 182                base = pte & RPDB_MASK;
 183                bits = pte & RPDS_MASK;
 184        }
 185
 186        /* Need a leaf at lowest level; 512GB pages not supported */
 187        if (level < 0 || level == 3)
 188                return -EINVAL;
 189
 190        /* We found a valid leaf PTE */
 191        /* Offset is now log base 2 of the page size */
 192        gpa = pte & 0x01fffffffffff000ul;
 193        if (gpa & ((1ul << offset) - 1))
 194                return -EINVAL;
 195        gpa |= eaddr & ((1ul << offset) - 1);
 196        for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
 197                if (offset == mmu_psize_defs[ps].shift)
 198                        break;
 199        gpte->page_size = ps;
 200        gpte->page_shift = offset;
 201
 202        gpte->eaddr = eaddr;
 203        gpte->raddr = gpa;
 204
 205        /* Work out permissions */
 206        gpte->may_read = !!(pte & _PAGE_READ);
 207        gpte->may_write = !!(pte & _PAGE_WRITE);
 208        gpte->may_execute = !!(pte & _PAGE_EXEC);
 209
 210        gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
 211
 212        if (pte_ret_p)
 213                *pte_ret_p = pte;
 214
 215        return 0;
 216}
 217
 218/*
 219 * Used to walk a partition or process table radix tree in guest memory
 220 * Note: We exploit the fact that a partition table and a process
 221 * table have the same layout, a partition-scoped page table and a
 222 * process-scoped page table have the same layout, and the 2nd
 223 * doubleword of a partition table entry has the same layout as
 224 * the PTCR register.
 225 */
 226int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
 227                                     struct kvmppc_pte *gpte, u64 table,
 228                                     int table_index, u64 *pte_ret_p)
 229{
 230        struct kvm *kvm = vcpu->kvm;
 231        int ret;
 232        unsigned long size, ptbl, root;
 233        struct prtb_entry entry;
 234
 235        if ((table & PRTS_MASK) > 24)
 236                return -EINVAL;
 237        size = 1ul << ((table & PRTS_MASK) + 12);
 238
 239        /* Is the table big enough to contain this entry? */
 240        if ((table_index * sizeof(entry)) >= size)
 241                return -EINVAL;
 242
 243        /* Read the table to find the root of the radix tree */
 244        ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
 245        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 246        ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
 247        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
 248        if (ret)
 249                return ret;
 250
 251        /* Root is stored in the first double word */
 252        root = be64_to_cpu(entry.prtb0);
 253
 254        return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
 255}
 256
 257int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 258                           struct kvmppc_pte *gpte, bool data, bool iswrite)
 259{
 260        u32 pid;
 261        u64 pte;
 262        int ret;
 263
 264        /* Work out effective PID */
 265        switch (eaddr >> 62) {
 266        case 0:
 267                pid = vcpu->arch.pid;
 268                break;
 269        case 3:
 270                pid = 0;
 271                break;
 272        default:
 273                return -EINVAL;
 274        }
 275
 276        ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
 277                                vcpu->kvm->arch.process_table, pid, &pte);
 278        if (ret)
 279                return ret;
 280
 281        /* Check privilege (applies only to process scoped translations) */
 282        if (kvmppc_get_msr(vcpu) & MSR_PR) {
 283                if (pte & _PAGE_PRIVILEGED) {
 284                        gpte->may_read = 0;
 285                        gpte->may_write = 0;
 286                        gpte->may_execute = 0;
 287                }
 288        } else {
 289                if (!(pte & _PAGE_PRIVILEGED)) {
 290                        /* Check AMR/IAMR to see if strict mode is in force */
 291                        if (vcpu->arch.amr & (1ul << 62))
 292                                gpte->may_read = 0;
 293                        if (vcpu->arch.amr & (1ul << 63))
 294                                gpte->may_write = 0;
 295                        if (vcpu->arch.iamr & (1ul << 62))
 296                                gpte->may_execute = 0;
 297                }
 298        }
 299
 300        return 0;
 301}
 302
 303void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
 304                             unsigned int pshift, unsigned int lpid)
 305{
 306        unsigned long psize = PAGE_SIZE;
 307        int psi;
 308        long rc;
 309        unsigned long rb;
 310
 311        if (pshift)
 312                psize = 1UL << pshift;
 313        else
 314                pshift = PAGE_SHIFT;
 315
 316        addr &= ~(psize - 1);
 317
 318        if (!kvmhv_on_pseries()) {
 319                radix__flush_tlb_lpid_page(lpid, addr, psize);
 320                return;
 321        }
 322
 323        psi = shift_to_mmu_psize(pshift);
 324
 325        if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) {
 326                rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
 327                rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
 328                                        lpid, rb);
 329        } else {
 330                rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
 331                                            H_RPTI_TYPE_NESTED |
 332                                            H_RPTI_TYPE_TLB,
 333                                            psize_to_rpti_pgsize(psi),
 334                                            addr, addr + psize);
 335        }
 336
 337        if (rc)
 338                pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
 339}
 340
 341static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
 342{
 343        long rc;
 344
 345        if (!kvmhv_on_pseries()) {
 346                radix__flush_pwc_lpid(lpid);
 347                return;
 348        }
 349
 350        if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
 351                rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
 352                                        lpid, TLBIEL_INVAL_SET_LPID);
 353        else
 354                rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
 355                                            H_RPTI_TYPE_NESTED |
 356                                            H_RPTI_TYPE_PWC, H_RPTI_PAGE_ALL,
 357                                            0, -1UL);
 358        if (rc)
 359                pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
 360}
 361
 362static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
 363                                      unsigned long clr, unsigned long set,
 364                                      unsigned long addr, unsigned int shift)
 365{
 366        return __radix_pte_update(ptep, clr, set);
 367}
 368
 369static void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
 370                             pte_t *ptep, pte_t pte)
 371{
 372        radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
 373}
 374
 375static struct kmem_cache *kvm_pte_cache;
 376static struct kmem_cache *kvm_pmd_cache;
 377
 378static pte_t *kvmppc_pte_alloc(void)
 379{
 380        pte_t *pte;
 381
 382        pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
 383        /* pmd_populate() will only reference _pa(pte). */
 384        kmemleak_ignore(pte);
 385
 386        return pte;
 387}
 388
 389static void kvmppc_pte_free(pte_t *ptep)
 390{
 391        kmem_cache_free(kvm_pte_cache, ptep);
 392}
 393
 394static pmd_t *kvmppc_pmd_alloc(void)
 395{
 396        pmd_t *pmd;
 397
 398        pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
 399        /* pud_populate() will only reference _pa(pmd). */
 400        kmemleak_ignore(pmd);
 401
 402        return pmd;
 403}
 404
 405static void kvmppc_pmd_free(pmd_t *pmdp)
 406{
 407        kmem_cache_free(kvm_pmd_cache, pmdp);
 408}
 409
 410/* Called with kvm->mmu_lock held */
 411void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
 412                      unsigned int shift,
 413                      const struct kvm_memory_slot *memslot,
 414                      unsigned int lpid)
 415
 416{
 417        unsigned long old;
 418        unsigned long gfn = gpa >> PAGE_SHIFT;
 419        unsigned long page_size = PAGE_SIZE;
 420        unsigned long hpa;
 421
 422        old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
 423        kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
 424
 425        /* The following only applies to L1 entries */
 426        if (lpid != kvm->arch.lpid)
 427                return;
 428
 429        if (!memslot) {
 430                memslot = gfn_to_memslot(kvm, gfn);
 431                if (!memslot)
 432                        return;
 433        }
 434        if (shift) { /* 1GB or 2MB page */
 435                page_size = 1ul << shift;
 436                if (shift == PMD_SHIFT)
 437                        kvm->stat.num_2M_pages--;
 438                else if (shift == PUD_SHIFT)
 439                        kvm->stat.num_1G_pages--;
 440        }
 441
 442        gpa &= ~(page_size - 1);
 443        hpa = old & PTE_RPN_MASK;
 444        kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
 445
 446        if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
 447                kvmppc_update_dirty_map(memslot, gfn, page_size);
 448}
 449
 450/*
 451 * kvmppc_free_p?d are used to free existing page tables, and recursively
 452 * descend and clear and free children.
 453 * Callers are responsible for flushing the PWC.
 454 *
 455 * When page tables are being unmapped/freed as part of page fault path
 456 * (full == false), valid ptes are generally not expected; however, there
 457 * is one situation where they arise, which is when dirty page logging is
 458 * turned off for a memslot while the VM is running.  The new memslot
 459 * becomes visible to page faults before the memslot commit function
 460 * gets to flush the memslot, which can lead to a 2MB page mapping being
 461 * installed for a guest physical address where there are already 64kB
 462 * (or 4kB) mappings (of sub-pages of the same 2MB page).
 463 */
 464static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
 465                                  unsigned int lpid)
 466{
 467        if (full) {
 468                memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE);
 469        } else {
 470                pte_t *p = pte;
 471                unsigned long it;
 472
 473                for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
 474                        if (pte_val(*p) == 0)
 475                                continue;
 476                        kvmppc_unmap_pte(kvm, p,
 477                                         pte_pfn(*p) << PAGE_SHIFT,
 478                                         PAGE_SHIFT, NULL, lpid);
 479                }
 480        }
 481
 482        kvmppc_pte_free(pte);
 483}
 484
 485static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
 486                                  unsigned int lpid)
 487{
 488        unsigned long im;
 489        pmd_t *p = pmd;
 490
 491        for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
 492                if (!pmd_present(*p))
 493                        continue;
 494                if (pmd_is_leaf(*p)) {
 495                        if (full) {
 496                                pmd_clear(p);
 497                        } else {
 498                                WARN_ON_ONCE(1);
 499                                kvmppc_unmap_pte(kvm, (pte_t *)p,
 500                                         pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
 501                                         PMD_SHIFT, NULL, lpid);
 502                        }
 503                } else {
 504                        pte_t *pte;
 505
 506                        pte = pte_offset_map(p, 0);
 507                        kvmppc_unmap_free_pte(kvm, pte, full, lpid);
 508                        pmd_clear(p);
 509                }
 510        }
 511        kvmppc_pmd_free(pmd);
 512}
 513
 514static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
 515                                  unsigned int lpid)
 516{
 517        unsigned long iu;
 518        pud_t *p = pud;
 519
 520        for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
 521                if (!pud_present(*p))
 522                        continue;
 523                if (pud_is_leaf(*p)) {
 524                        pud_clear(p);
 525                } else {
 526                        pmd_t *pmd;
 527
 528                        pmd = pmd_offset(p, 0);
 529                        kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
 530                        pud_clear(p);
 531                }
 532        }
 533        pud_free(kvm->mm, pud);
 534}
 535
 536void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
 537{
 538        unsigned long ig;
 539
 540        for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
 541                p4d_t *p4d = p4d_offset(pgd, 0);
 542                pud_t *pud;
 543
 544                if (!p4d_present(*p4d))
 545                        continue;
 546                pud = pud_offset(p4d, 0);
 547                kvmppc_unmap_free_pud(kvm, pud, lpid);
 548                p4d_clear(p4d);
 549        }
 550}
 551
 552void kvmppc_free_radix(struct kvm *kvm)
 553{
 554        if (kvm->arch.pgtable) {
 555                kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
 556                                          kvm->arch.lpid);
 557                pgd_free(kvm->mm, kvm->arch.pgtable);
 558                kvm->arch.pgtable = NULL;
 559        }
 560}
 561
 562static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
 563                                        unsigned long gpa, unsigned int lpid)
 564{
 565        pte_t *pte = pte_offset_kernel(pmd, 0);
 566
 567        /*
 568         * Clearing the pmd entry then flushing the PWC ensures that the pte
 569         * page no longer be cached by the MMU, so can be freed without
 570         * flushing the PWC again.
 571         */
 572        pmd_clear(pmd);
 573        kvmppc_radix_flush_pwc(kvm, lpid);
 574
 575        kvmppc_unmap_free_pte(kvm, pte, false, lpid);
 576}
 577
 578static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
 579                                        unsigned long gpa, unsigned int lpid)
 580{
 581        pmd_t *pmd = pmd_offset(pud, 0);
 582
 583        /*
 584         * Clearing the pud entry then flushing the PWC ensures that the pmd
 585         * page and any children pte pages will no longer be cached by the MMU,
 586         * so can be freed without flushing the PWC again.
 587         */
 588        pud_clear(pud);
 589        kvmppc_radix_flush_pwc(kvm, lpid);
 590
 591        kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
 592}
 593
 594/*
 595 * There are a number of bits which may differ between different faults to
 596 * the same partition scope entry. RC bits, in the course of cleaning and
 597 * aging. And the write bit can change, either the access could have been
 598 * upgraded, or a read fault could happen concurrently with a write fault
 599 * that sets those bits first.
 600 */
 601#define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
 602
 603int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
 604                      unsigned long gpa, unsigned int level,
 605                      unsigned long mmu_seq, unsigned int lpid,
 606                      unsigned long *rmapp, struct rmap_nested **n_rmap)
 607{
 608        pgd_t *pgd;
 609        p4d_t *p4d;
 610        pud_t *pud, *new_pud = NULL;
 611        pmd_t *pmd, *new_pmd = NULL;
 612        pte_t *ptep, *new_ptep = NULL;
 613        int ret;
 614
 615        /* Traverse the guest's 2nd-level tree, allocate new levels needed */
 616        pgd = pgtable + pgd_index(gpa);
 617        p4d = p4d_offset(pgd, gpa);
 618
 619        pud = NULL;
 620        if (p4d_present(*p4d))
 621                pud = pud_offset(p4d, gpa);
 622        else
 623                new_pud = pud_alloc_one(kvm->mm, gpa);
 624
 625        pmd = NULL;
 626        if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
 627                pmd = pmd_offset(pud, gpa);
 628        else if (level <= 1)
 629                new_pmd = kvmppc_pmd_alloc();
 630
 631        if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
 632                new_ptep = kvmppc_pte_alloc();
 633
 634        /* Check if we might have been invalidated; let the guest retry if so */
 635        spin_lock(&kvm->mmu_lock);
 636        ret = -EAGAIN;
 637        if (mmu_notifier_retry(kvm, mmu_seq))
 638                goto out_unlock;
 639
 640        /* Now traverse again under the lock and change the tree */
 641        ret = -ENOMEM;
 642        if (p4d_none(*p4d)) {
 643                if (!new_pud)
 644                        goto out_unlock;
 645                p4d_populate(kvm->mm, p4d, new_pud);
 646                new_pud = NULL;
 647        }
 648        pud = pud_offset(p4d, gpa);
 649        if (pud_is_leaf(*pud)) {
 650                unsigned long hgpa = gpa & PUD_MASK;
 651
 652                /* Check if we raced and someone else has set the same thing */
 653                if (level == 2) {
 654                        if (pud_raw(*pud) == pte_raw(pte)) {
 655                                ret = 0;
 656                                goto out_unlock;
 657                        }
 658                        /* Valid 1GB page here already, add our extra bits */
 659                        WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
 660                                                        PTE_BITS_MUST_MATCH);
 661                        kvmppc_radix_update_pte(kvm, (pte_t *)pud,
 662                                              0, pte_val(pte), hgpa, PUD_SHIFT);
 663                        ret = 0;
 664                        goto out_unlock;
 665                }
 666                /*
 667                 * If we raced with another CPU which has just put
 668                 * a 1GB pte in after we saw a pmd page, try again.
 669                 */
 670                if (!new_pmd) {
 671                        ret = -EAGAIN;
 672                        goto out_unlock;
 673                }
 674                /* Valid 1GB page here already, remove it */
 675                kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
 676                                 lpid);
 677        }
 678        if (level == 2) {
 679                if (!pud_none(*pud)) {
 680                        /*
 681                         * There's a page table page here, but we wanted to
 682                         * install a large page, so remove and free the page
 683                         * table page.
 684                         */
 685                        kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
 686                }
 687                kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
 688                if (rmapp && n_rmap)
 689                        kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
 690                ret = 0;
 691                goto out_unlock;
 692        }
 693        if (pud_none(*pud)) {
 694                if (!new_pmd)
 695                        goto out_unlock;
 696                pud_populate(kvm->mm, pud, new_pmd);
 697                new_pmd = NULL;
 698        }
 699        pmd = pmd_offset(pud, gpa);
 700        if (pmd_is_leaf(*pmd)) {
 701                unsigned long lgpa = gpa & PMD_MASK;
 702
 703                /* Check if we raced and someone else has set the same thing */
 704                if (level == 1) {
 705                        if (pmd_raw(*pmd) == pte_raw(pte)) {
 706                                ret = 0;
 707                                goto out_unlock;
 708                        }
 709                        /* Valid 2MB page here already, add our extra bits */
 710                        WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
 711                                                        PTE_BITS_MUST_MATCH);
 712                        kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
 713                                        0, pte_val(pte), lgpa, PMD_SHIFT);
 714                        ret = 0;
 715                        goto out_unlock;
 716                }
 717
 718                /*
 719                 * If we raced with another CPU which has just put
 720                 * a 2MB pte in after we saw a pte page, try again.
 721                 */
 722                if (!new_ptep) {
 723                        ret = -EAGAIN;
 724                        goto out_unlock;
 725                }
 726                /* Valid 2MB page here already, remove it */
 727                kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
 728                                 lpid);
 729        }
 730        if (level == 1) {
 731                if (!pmd_none(*pmd)) {
 732                        /*
 733                         * There's a page table page here, but we wanted to
 734                         * install a large page, so remove and free the page
 735                         * table page.
 736                         */
 737                        kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
 738                }
 739                kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
 740                if (rmapp && n_rmap)
 741                        kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
 742                ret = 0;
 743                goto out_unlock;
 744        }
 745        if (pmd_none(*pmd)) {
 746                if (!new_ptep)
 747                        goto out_unlock;
 748                pmd_populate(kvm->mm, pmd, new_ptep);
 749                new_ptep = NULL;
 750        }
 751        ptep = pte_offset_kernel(pmd, gpa);
 752        if (pte_present(*ptep)) {
 753                /* Check if someone else set the same thing */
 754                if (pte_raw(*ptep) == pte_raw(pte)) {
 755                        ret = 0;
 756                        goto out_unlock;
 757                }
 758                /* Valid page here already, add our extra bits */
 759                WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
 760                                                        PTE_BITS_MUST_MATCH);
 761                kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
 762                ret = 0;
 763                goto out_unlock;
 764        }
 765        kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
 766        if (rmapp && n_rmap)
 767                kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
 768        ret = 0;
 769
 770 out_unlock:
 771        spin_unlock(&kvm->mmu_lock);
 772        if (new_pud)
 773                pud_free(kvm->mm, new_pud);
 774        if (new_pmd)
 775                kvmppc_pmd_free(new_pmd);
 776        if (new_ptep)
 777                kvmppc_pte_free(new_ptep);
 778        return ret;
 779}
 780
 781bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
 782                             unsigned long gpa, unsigned int lpid)
 783{
 784        unsigned long pgflags;
 785        unsigned int shift;
 786        pte_t *ptep;
 787
 788        /*
 789         * Need to set an R or C bit in the 2nd-level tables;
 790         * since we are just helping out the hardware here,
 791         * it is sufficient to do what the hardware does.
 792         */
 793        pgflags = _PAGE_ACCESSED;
 794        if (writing)
 795                pgflags |= _PAGE_DIRTY;
 796
 797        if (nested)
 798                ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
 799        else
 800                ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
 801
 802        if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
 803                kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
 804                return true;
 805        }
 806        return false;
 807}
 808
 809int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
 810                                   unsigned long gpa,
 811                                   struct kvm_memory_slot *memslot,
 812                                   bool writing, bool kvm_ro,
 813                                   pte_t *inserted_pte, unsigned int *levelp)
 814{
 815        struct kvm *kvm = vcpu->kvm;
 816        struct page *page = NULL;
 817        unsigned long mmu_seq;
 818        unsigned long hva, gfn = gpa >> PAGE_SHIFT;
 819        bool upgrade_write = false;
 820        bool *upgrade_p = &upgrade_write;
 821        pte_t pte, *ptep;
 822        unsigned int shift, level;
 823        int ret;
 824        bool large_enable;
 825
 826        /* used to check for invalidations in progress */
 827        mmu_seq = kvm->mmu_notifier_seq;
 828        smp_rmb();
 829
 830        /*
 831         * Do a fast check first, since __gfn_to_pfn_memslot doesn't
 832         * do it with !atomic && !async, which is how we call it.
 833         * We always ask for write permission since the common case
 834         * is that the page is writable.
 835         */
 836        hva = gfn_to_hva_memslot(memslot, gfn);
 837        if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
 838                upgrade_write = true;
 839        } else {
 840                unsigned long pfn;
 841
 842                /* Call KVM generic code to do the slow-path check */
 843                pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
 844                                           writing, upgrade_p, NULL);
 845                if (is_error_noslot_pfn(pfn))
 846                        return -EFAULT;
 847                page = NULL;
 848                if (pfn_valid(pfn)) {
 849                        page = pfn_to_page(pfn);
 850                        if (PageReserved(page))
 851                                page = NULL;
 852                }
 853        }
 854
 855        /*
 856         * Read the PTE from the process' radix tree and use that
 857         * so we get the shift and attribute bits.
 858         */
 859        spin_lock(&kvm->mmu_lock);
 860        ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
 861        pte = __pte(0);
 862        if (ptep)
 863                pte = READ_ONCE(*ptep);
 864        spin_unlock(&kvm->mmu_lock);
 865        /*
 866         * If the PTE disappeared temporarily due to a THP
 867         * collapse, just return and let the guest try again.
 868         */
 869        if (!pte_present(pte)) {
 870                if (page)
 871                        put_page(page);
 872                return RESUME_GUEST;
 873        }
 874
 875        /* If we're logging dirty pages, always map single pages */
 876        large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
 877
 878        /* Get pte level from shift/size */
 879        if (large_enable && shift == PUD_SHIFT &&
 880            (gpa & (PUD_SIZE - PAGE_SIZE)) ==
 881            (hva & (PUD_SIZE - PAGE_SIZE))) {
 882                level = 2;
 883        } else if (large_enable && shift == PMD_SHIFT &&
 884                   (gpa & (PMD_SIZE - PAGE_SIZE)) ==
 885                   (hva & (PMD_SIZE - PAGE_SIZE))) {
 886                level = 1;
 887        } else {
 888                level = 0;
 889                if (shift > PAGE_SHIFT) {
 890                        /*
 891                         * If the pte maps more than one page, bring over
 892                         * bits from the virtual address to get the real
 893                         * address of the specific single page we want.
 894                         */
 895                        unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
 896                        pte = __pte(pte_val(pte) | (hva & rpnmask));
 897                }
 898        }
 899
 900        pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
 901        if (writing || upgrade_write) {
 902                if (pte_val(pte) & _PAGE_WRITE)
 903                        pte = __pte(pte_val(pte) | _PAGE_DIRTY);
 904        } else {
 905                pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
 906        }
 907
 908        /* Allocate space in the tree and write the PTE */
 909        ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
 910                                mmu_seq, kvm->arch.lpid, NULL, NULL);
 911        if (inserted_pte)
 912                *inserted_pte = pte;
 913        if (levelp)
 914                *levelp = level;
 915
 916        if (page) {
 917                if (!ret && (pte_val(pte) & _PAGE_WRITE))
 918                        set_page_dirty_lock(page);
 919                put_page(page);
 920        }
 921
 922        /* Increment number of large pages if we (successfully) inserted one */
 923        if (!ret) {
 924                if (level == 1)
 925                        kvm->stat.num_2M_pages++;
 926                else if (level == 2)
 927                        kvm->stat.num_1G_pages++;
 928        }
 929
 930        return ret;
 931}
 932
 933int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
 934                                   unsigned long ea, unsigned long dsisr)
 935{
 936        struct kvm *kvm = vcpu->kvm;
 937        unsigned long gpa, gfn;
 938        struct kvm_memory_slot *memslot;
 939        long ret;
 940        bool writing = !!(dsisr & DSISR_ISSTORE);
 941        bool kvm_ro = false;
 942
 943        /* Check for unusual errors */
 944        if (dsisr & DSISR_UNSUPP_MMU) {
 945                pr_err("KVM: Got unsupported MMU fault\n");
 946                return -EFAULT;
 947        }
 948        if (dsisr & DSISR_BADACCESS) {
 949                /* Reflect to the guest as DSI */
 950                pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
 951                kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
 952                return RESUME_GUEST;
 953        }
 954
 955        /* Translate the logical address */
 956        gpa = vcpu->arch.fault_gpa & ~0xfffUL;
 957        gpa &= ~0xF000000000000000ul;
 958        gfn = gpa >> PAGE_SHIFT;
 959        if (!(dsisr & DSISR_PRTABLE_FAULT))
 960                gpa |= ea & 0xfff;
 961
 962        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
 963                return kvmppc_send_page_to_uv(kvm, gfn);
 964
 965        /* Get the corresponding memslot */
 966        memslot = gfn_to_memslot(kvm, gfn);
 967
 968        /* No memslot means it's an emulated MMIO region */
 969        if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
 970                if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
 971                             DSISR_SET_RC)) {
 972                        /*
 973                         * Bad address in guest page table tree, or other
 974                         * unusual error - reflect it to the guest as DSI.
 975                         */
 976                        kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
 977                        return RESUME_GUEST;
 978                }
 979                return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
 980        }
 981
 982        if (memslot->flags & KVM_MEM_READONLY) {
 983                if (writing) {
 984                        /* give the guest a DSI */
 985                        kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE |
 986                                                       DSISR_PROTFAULT);
 987                        return RESUME_GUEST;
 988                }
 989                kvm_ro = true;
 990        }
 991
 992        /* Failed to set the reference/change bits */
 993        if (dsisr & DSISR_SET_RC) {
 994                spin_lock(&kvm->mmu_lock);
 995                if (kvmppc_hv_handle_set_rc(kvm, false, writing,
 996                                            gpa, kvm->arch.lpid))
 997                        dsisr &= ~DSISR_SET_RC;
 998                spin_unlock(&kvm->mmu_lock);
 999
1000                if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1001                               DSISR_PROTFAULT | DSISR_SET_RC)))
1002                        return RESUME_GUEST;
1003        }
1004
1005        /* Try to insert a pte */
1006        ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
1007                                             kvm_ro, NULL, NULL);
1008
1009        if (ret == 0 || ret == -EAGAIN)
1010                ret = RESUME_GUEST;
1011        return ret;
1012}
1013
1014/* Called with kvm->mmu_lock held */
1015void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1016                     unsigned long gfn)
1017{
1018        pte_t *ptep;
1019        unsigned long gpa = gfn << PAGE_SHIFT;
1020        unsigned int shift;
1021
1022        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
1023                uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
1024                return;
1025        }
1026
1027        ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1028        if (ptep && pte_present(*ptep))
1029                kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1030                                 kvm->arch.lpid);
1031}
1032
1033/* Called with kvm->mmu_lock held */
1034bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1035                   unsigned long gfn)
1036{
1037        pte_t *ptep;
1038        unsigned long gpa = gfn << PAGE_SHIFT;
1039        unsigned int shift;
1040        bool ref = false;
1041        unsigned long old, *rmapp;
1042
1043        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1044                return ref;
1045
1046        ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1047        if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
1048                old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
1049                                              gpa, shift);
1050                /* XXX need to flush tlb here? */
1051                /* Also clear bit in ptes in shadow pgtable for nested guests */
1052                rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1053                kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
1054                                               old & PTE_RPN_MASK,
1055                                               1UL << shift);
1056                ref = true;
1057        }
1058        return ref;
1059}
1060
1061/* Called with kvm->mmu_lock held */
1062bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1063                        unsigned long gfn)
1064
1065{
1066        pte_t *ptep;
1067        unsigned long gpa = gfn << PAGE_SHIFT;
1068        unsigned int shift;
1069        bool ref = false;
1070
1071        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1072                return ref;
1073
1074        ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1075        if (ptep && pte_present(*ptep) && pte_young(*ptep))
1076                ref = true;
1077        return ref;
1078}
1079
1080/* Returns the number of PAGE_SIZE pages that are dirty */
1081static int kvm_radix_test_clear_dirty(struct kvm *kvm,
1082                                struct kvm_memory_slot *memslot, int pagenum)
1083{
1084        unsigned long gfn = memslot->base_gfn + pagenum;
1085        unsigned long gpa = gfn << PAGE_SHIFT;
1086        pte_t *ptep, pte;
1087        unsigned int shift;
1088        int ret = 0;
1089        unsigned long old, *rmapp;
1090
1091        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1092                return ret;
1093
1094        /*
1095         * For performance reasons we don't hold kvm->mmu_lock while walking the
1096         * partition scoped table.
1097         */
1098        ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift);
1099        if (!ptep)
1100                return 0;
1101
1102        pte = READ_ONCE(*ptep);
1103        if (pte_present(pte) && pte_dirty(pte)) {
1104                spin_lock(&kvm->mmu_lock);
1105                /*
1106                 * Recheck the pte again
1107                 */
1108                if (pte_val(pte) != pte_val(*ptep)) {
1109                        /*
1110                         * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
1111                         * only find PAGE_SIZE pte entries here. We can continue
1112                         * to use the pte addr returned by above page table
1113                         * walk.
1114                         */
1115                        if (!pte_present(*ptep) || !pte_dirty(*ptep)) {
1116                                spin_unlock(&kvm->mmu_lock);
1117                                return 0;
1118                        }
1119                }
1120
1121                ret = 1;
1122                VM_BUG_ON(shift);
1123                old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
1124                                              gpa, shift);
1125                kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
1126                /* Also clear bit in ptes in shadow pgtable for nested guests */
1127                rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1128                kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
1129                                               old & PTE_RPN_MASK,
1130                                               1UL << shift);
1131                spin_unlock(&kvm->mmu_lock);
1132        }
1133        return ret;
1134}
1135
1136long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
1137                        struct kvm_memory_slot *memslot, unsigned long *map)
1138{
1139        unsigned long i, j;
1140        int npages;
1141
1142        for (i = 0; i < memslot->npages; i = j) {
1143                npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
1144
1145                /*
1146                 * Note that if npages > 0 then i must be a multiple of npages,
1147                 * since huge pages are only used to back the guest at guest
1148                 * real addresses that are a multiple of their size.
1149                 * Since we have at most one PTE covering any given guest
1150                 * real address, if npages > 1 we can skip to i + npages.
1151                 */
1152                j = i + 1;
1153                if (npages) {
1154                        set_dirty_bits(map, i, npages);
1155                        j = i + npages;
1156                }
1157        }
1158        return 0;
1159}
1160
1161void kvmppc_radix_flush_memslot(struct kvm *kvm,
1162                                const struct kvm_memory_slot *memslot)
1163{
1164        unsigned long n;
1165        pte_t *ptep;
1166        unsigned long gpa;
1167        unsigned int shift;
1168
1169        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
1170                kvmppc_uvmem_drop_pages(memslot, kvm, true);
1171
1172        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1173                return;
1174
1175        gpa = memslot->base_gfn << PAGE_SHIFT;
1176        spin_lock(&kvm->mmu_lock);
1177        for (n = memslot->npages; n; --n) {
1178                ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1179                if (ptep && pte_present(*ptep))
1180                        kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1181                                         kvm->arch.lpid);
1182                gpa += PAGE_SIZE;
1183        }
1184        /*
1185         * Increase the mmu notifier sequence number to prevent any page
1186         * fault that read the memslot earlier from writing a PTE.
1187         */
1188        kvm->mmu_notifier_seq++;
1189        spin_unlock(&kvm->mmu_lock);
1190}
1191
1192static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
1193                                 int psize, int *indexp)
1194{
1195        if (!mmu_psize_defs[psize].shift)
1196                return;
1197        info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
1198                (mmu_psize_defs[psize].ap << 29);
1199        ++(*indexp);
1200}
1201
1202int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
1203{
1204        int i;
1205
1206        if (!radix_enabled())
1207                return -EINVAL;
1208        memset(info, 0, sizeof(*info));
1209
1210        /* 4k page size */
1211        info->geometries[0].page_shift = 12;
1212        info->geometries[0].level_bits[0] = 9;
1213        for (i = 1; i < 4; ++i)
1214                info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
1215        /* 64k page size */
1216        info->geometries[1].page_shift = 16;
1217        for (i = 0; i < 4; ++i)
1218                info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
1219
1220        i = 0;
1221        add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
1222        add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
1223        add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
1224        add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
1225
1226        return 0;
1227}
1228
1229int kvmppc_init_vm_radix(struct kvm *kvm)
1230{
1231        kvm->arch.pgtable = pgd_alloc(kvm->mm);
1232        if (!kvm->arch.pgtable)
1233                return -ENOMEM;
1234        return 0;
1235}
1236
1237static void pte_ctor(void *addr)
1238{
1239        memset(addr, 0, RADIX_PTE_TABLE_SIZE);
1240}
1241
1242static void pmd_ctor(void *addr)
1243{
1244        memset(addr, 0, RADIX_PMD_TABLE_SIZE);
1245}
1246
1247struct debugfs_radix_state {
1248        struct kvm      *kvm;
1249        struct mutex    mutex;
1250        unsigned long   gpa;
1251        int             lpid;
1252        int             chars_left;
1253        int             buf_index;
1254        char            buf[128];
1255        u8              hdr;
1256};
1257
1258static int debugfs_radix_open(struct inode *inode, struct file *file)
1259{
1260        struct kvm *kvm = inode->i_private;
1261        struct debugfs_radix_state *p;
1262
1263        p = kzalloc(sizeof(*p), GFP_KERNEL);
1264        if (!p)
1265                return -ENOMEM;
1266
1267        kvm_get_kvm(kvm);
1268        p->kvm = kvm;
1269        mutex_init(&p->mutex);
1270        file->private_data = p;
1271
1272        return nonseekable_open(inode, file);
1273}
1274
1275static int debugfs_radix_release(struct inode *inode, struct file *file)
1276{
1277        struct debugfs_radix_state *p = file->private_data;
1278
1279        kvm_put_kvm(p->kvm);
1280        kfree(p);
1281        return 0;
1282}
1283
1284static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
1285                                 size_t len, loff_t *ppos)
1286{
1287        struct debugfs_radix_state *p = file->private_data;
1288        ssize_t ret, r;
1289        unsigned long n;
1290        struct kvm *kvm;
1291        unsigned long gpa;
1292        pgd_t *pgt;
1293        struct kvm_nested_guest *nested;
1294        pgd_t *pgdp;
1295        p4d_t p4d, *p4dp;
1296        pud_t pud, *pudp;
1297        pmd_t pmd, *pmdp;
1298        pte_t *ptep;
1299        int shift;
1300        unsigned long pte;
1301
1302        kvm = p->kvm;
1303        if (!kvm_is_radix(kvm))
1304                return 0;
1305
1306        ret = mutex_lock_interruptible(&p->mutex);
1307        if (ret)
1308                return ret;
1309
1310        if (p->chars_left) {
1311                n = p->chars_left;
1312                if (n > len)
1313                        n = len;
1314                r = copy_to_user(buf, p->buf + p->buf_index, n);
1315                n -= r;
1316                p->chars_left -= n;
1317                p->buf_index += n;
1318                buf += n;
1319                len -= n;
1320                ret = n;
1321                if (r) {
1322                        if (!n)
1323                                ret = -EFAULT;
1324                        goto out;
1325                }
1326        }
1327
1328        gpa = p->gpa;
1329        nested = NULL;
1330        pgt = NULL;
1331        while (len != 0 && p->lpid >= 0) {
1332                if (gpa >= RADIX_PGTABLE_RANGE) {
1333                        gpa = 0;
1334                        pgt = NULL;
1335                        if (nested) {
1336                                kvmhv_put_nested(nested);
1337                                nested = NULL;
1338                        }
1339                        p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
1340                        p->hdr = 0;
1341                        if (p->lpid < 0)
1342                                break;
1343                }
1344                if (!pgt) {
1345                        if (p->lpid == 0) {
1346                                pgt = kvm->arch.pgtable;
1347                        } else {
1348                                nested = kvmhv_get_nested(kvm, p->lpid, false);
1349                                if (!nested) {
1350                                        gpa = RADIX_PGTABLE_RANGE;
1351                                        continue;
1352                                }
1353                                pgt = nested->shadow_pgtable;
1354                        }
1355                }
1356                n = 0;
1357                if (!p->hdr) {
1358                        if (p->lpid > 0)
1359                                n = scnprintf(p->buf, sizeof(p->buf),
1360                                              "\nNested LPID %d: ", p->lpid);
1361                        n += scnprintf(p->buf + n, sizeof(p->buf) - n,
1362                                      "pgdir: %lx\n", (unsigned long)pgt);
1363                        p->hdr = 1;
1364                        goto copy;
1365                }
1366
1367                pgdp = pgt + pgd_index(gpa);
1368                p4dp = p4d_offset(pgdp, gpa);
1369                p4d = READ_ONCE(*p4dp);
1370                if (!(p4d_val(p4d) & _PAGE_PRESENT)) {
1371                        gpa = (gpa & P4D_MASK) + P4D_SIZE;
1372                        continue;
1373                }
1374
1375                pudp = pud_offset(&p4d, gpa);
1376                pud = READ_ONCE(*pudp);
1377                if (!(pud_val(pud) & _PAGE_PRESENT)) {
1378                        gpa = (gpa & PUD_MASK) + PUD_SIZE;
1379                        continue;
1380                }
1381                if (pud_val(pud) & _PAGE_PTE) {
1382                        pte = pud_val(pud);
1383                        shift = PUD_SHIFT;
1384                        goto leaf;
1385                }
1386
1387                pmdp = pmd_offset(&pud, gpa);
1388                pmd = READ_ONCE(*pmdp);
1389                if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
1390                        gpa = (gpa & PMD_MASK) + PMD_SIZE;
1391                        continue;
1392                }
1393                if (pmd_val(pmd) & _PAGE_PTE) {
1394                        pte = pmd_val(pmd);
1395                        shift = PMD_SHIFT;
1396                        goto leaf;
1397                }
1398
1399                ptep = pte_offset_kernel(&pmd, gpa);
1400                pte = pte_val(READ_ONCE(*ptep));
1401                if (!(pte & _PAGE_PRESENT)) {
1402                        gpa += PAGE_SIZE;
1403                        continue;
1404                }
1405                shift = PAGE_SHIFT;
1406        leaf:
1407                n = scnprintf(p->buf, sizeof(p->buf),
1408                              " %lx: %lx %d\n", gpa, pte, shift);
1409                gpa += 1ul << shift;
1410        copy:
1411                p->chars_left = n;
1412                if (n > len)
1413                        n = len;
1414                r = copy_to_user(buf, p->buf, n);
1415                n -= r;
1416                p->chars_left -= n;
1417                p->buf_index = n;
1418                buf += n;
1419                len -= n;
1420                ret += n;
1421                if (r) {
1422                        if (!ret)
1423                                ret = -EFAULT;
1424                        break;
1425                }
1426        }
1427        p->gpa = gpa;
1428        if (nested)
1429                kvmhv_put_nested(nested);
1430
1431 out:
1432        mutex_unlock(&p->mutex);
1433        return ret;
1434}
1435
1436static ssize_t debugfs_radix_write(struct file *file, const char __user *buf,
1437                           size_t len, loff_t *ppos)
1438{
1439        return -EACCES;
1440}
1441
1442static const struct file_operations debugfs_radix_fops = {
1443        .owner   = THIS_MODULE,
1444        .open    = debugfs_radix_open,
1445        .release = debugfs_radix_release,
1446        .read    = debugfs_radix_read,
1447        .write   = debugfs_radix_write,
1448        .llseek  = generic_file_llseek,
1449};
1450
1451void kvmhv_radix_debugfs_init(struct kvm *kvm)
1452{
1453        debugfs_create_file("radix", 0400, kvm->arch.debugfs_dir, kvm,
1454                            &debugfs_radix_fops);
1455}
1456
1457int kvmppc_radix_init(void)
1458{
1459        unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
1460
1461        kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
1462        if (!kvm_pte_cache)
1463                return -ENOMEM;
1464
1465        size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
1466
1467        kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
1468        if (!kvm_pmd_cache) {
1469                kmem_cache_destroy(kvm_pte_cache);
1470                return -ENOMEM;
1471        }
1472
1473        return 0;
1474}
1475
1476void kvmppc_radix_exit(void)
1477{
1478        kmem_cache_destroy(kvm_pte_cache);
1479        kmem_cache_destroy(kvm_pmd_cache);
1480}
1481