linux/arch/powerpc/kvm/book3s_64_mmu_radix.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/string.h>
  11#include <linux/kvm.h>
  12#include <linux/kvm_host.h>
  13
  14#include <asm/kvm_ppc.h>
  15#include <asm/kvm_book3s.h>
  16#include <asm/page.h>
  17#include <asm/mmu.h>
  18#include <asm/pgtable.h>
  19#include <asm/pgalloc.h>
  20#include <asm/pte-walk.h>
  21
  22/*
  23 * Supported radix tree geometry.
  24 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
  25 * for a page size of 64k or 4k.
  26 */
  27static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
  28
  29int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
  30                           struct kvmppc_pte *gpte, bool data, bool iswrite)
  31{
  32        struct kvm *kvm = vcpu->kvm;
  33        u32 pid;
  34        int ret, level, ps;
  35        __be64 prte, rpte;
  36        unsigned long ptbl;
  37        unsigned long root, pte, index;
  38        unsigned long rts, bits, offset;
  39        unsigned long gpa;
  40        unsigned long proc_tbl_size;
  41
  42        /* Work out effective PID */
  43        switch (eaddr >> 62) {
  44        case 0:
  45                pid = vcpu->arch.pid;
  46                break;
  47        case 3:
  48                pid = 0;
  49                break;
  50        default:
  51                return -EINVAL;
  52        }
  53        proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
  54        if (pid * 16 >= proc_tbl_size)
  55                return -EINVAL;
  56
  57        /* Read partition table to find root of tree for effective PID */
  58        ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
  59        ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
  60        if (ret)
  61                return ret;
  62
  63        root = be64_to_cpu(prte);
  64        rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
  65                ((root & RTS2_MASK) >> RTS2_SHIFT);
  66        bits = root & RPDS_MASK;
  67        root = root & RPDB_MASK;
  68
  69        /* P9 DD1 interprets RTS (radix tree size) differently */
  70        offset = rts + 31;
  71        if (cpu_has_feature(CPU_FTR_POWER9_DD1))
  72                offset -= 3;
  73
  74        /* current implementations only support 52-bit space */
  75        if (offset != 52)
  76                return -EINVAL;
  77
  78        for (level = 3; level >= 0; --level) {
  79                if (level && bits != p9_supported_radix_bits[level])
  80                        return -EINVAL;
  81                if (level == 0 && !(bits == 5 || bits == 9))
  82                        return -EINVAL;
  83                offset -= bits;
  84                index = (eaddr >> offset) & ((1UL << bits) - 1);
  85                /* check that low bits of page table base are zero */
  86                if (root & ((1UL << (bits + 3)) - 1))
  87                        return -EINVAL;
  88                ret = kvm_read_guest(kvm, root + index * 8,
  89                                     &rpte, sizeof(rpte));
  90                if (ret)
  91                        return ret;
  92                pte = __be64_to_cpu(rpte);
  93                if (!(pte & _PAGE_PRESENT))
  94                        return -ENOENT;
  95                if (pte & _PAGE_PTE)
  96                        break;
  97                bits = pte & 0x1f;
  98                root = pte & 0x0fffffffffffff00ul;
  99        }
 100        /* need a leaf at lowest level; 512GB pages not supported */
 101        if (level < 0 || level == 3)
 102                return -EINVAL;
 103
 104        /* offset is now log base 2 of the page size */
 105        gpa = pte & 0x01fffffffffff000ul;
 106        if (gpa & ((1ul << offset) - 1))
 107                return -EINVAL;
 108        gpa += eaddr & ((1ul << offset) - 1);
 109        for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
 110                if (offset == mmu_psize_defs[ps].shift)
 111                        break;
 112        gpte->page_size = ps;
 113
 114        gpte->eaddr = eaddr;
 115        gpte->raddr = gpa;
 116
 117        /* Work out permissions */
 118        gpte->may_read = !!(pte & _PAGE_READ);
 119        gpte->may_write = !!(pte & _PAGE_WRITE);
 120        gpte->may_execute = !!(pte & _PAGE_EXEC);
 121        if (kvmppc_get_msr(vcpu) & MSR_PR) {
 122                if (pte & _PAGE_PRIVILEGED) {
 123                        gpte->may_read = 0;
 124                        gpte->may_write = 0;
 125                        gpte->may_execute = 0;
 126                }
 127        } else {
 128                if (!(pte & _PAGE_PRIVILEGED)) {
 129                        /* Check AMR/IAMR to see if strict mode is in force */
 130                        if (vcpu->arch.amr & (1ul << 62))
 131                                gpte->may_read = 0;
 132                        if (vcpu->arch.amr & (1ul << 63))
 133                                gpte->may_write = 0;
 134                        if (vcpu->arch.iamr & (1ul << 62))
 135                                gpte->may_execute = 0;
 136                }
 137        }
 138
 139        return 0;
 140}
 141
 142#ifdef CONFIG_PPC_64K_PAGES
 143#define MMU_BASE_PSIZE  MMU_PAGE_64K
 144#else
 145#define MMU_BASE_PSIZE  MMU_PAGE_4K
 146#endif
 147
 148static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
 149                                    unsigned int pshift)
 150{
 151        int psize = MMU_BASE_PSIZE;
 152
 153        if (pshift >= PMD_SHIFT)
 154                psize = MMU_PAGE_2M;
 155        addr &= ~0xfffUL;
 156        addr |= mmu_psize_defs[psize].ap << 5;
 157        asm volatile("ptesync": : :"memory");
 158        asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
 159                     : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
 160        asm volatile("ptesync": : :"memory");
 161}
 162
 163unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
 164                                      unsigned long clr, unsigned long set,
 165                                      unsigned long addr, unsigned int shift)
 166{
 167        unsigned long old = 0;
 168
 169        if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) &&
 170            pte_present(*ptep)) {
 171                /* have to invalidate it first */
 172                old = __radix_pte_update(ptep, _PAGE_PRESENT, 0);
 173                kvmppc_radix_tlbie_page(kvm, addr, shift);
 174                set |= _PAGE_PRESENT;
 175                old &= _PAGE_PRESENT;
 176        }
 177        return __radix_pte_update(ptep, clr, set) | old;
 178}
 179
 180void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
 181                             pte_t *ptep, pte_t pte)
 182{
 183        radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
 184}
 185
 186static struct kmem_cache *kvm_pte_cache;
 187
 188static pte_t *kvmppc_pte_alloc(void)
 189{
 190        return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
 191}
 192
 193static void kvmppc_pte_free(pte_t *ptep)
 194{
 195        kmem_cache_free(kvm_pte_cache, ptep);
 196}
 197
 198static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
 199                             unsigned int level, unsigned long mmu_seq)
 200{
 201        pgd_t *pgd;
 202        pud_t *pud, *new_pud = NULL;
 203        pmd_t *pmd, *new_pmd = NULL;
 204        pte_t *ptep, *new_ptep = NULL;
 205        unsigned long old;
 206        int ret;
 207
 208        /* Traverse the guest's 2nd-level tree, allocate new levels needed */
 209        pgd = kvm->arch.pgtable + pgd_index(gpa);
 210        pud = NULL;
 211        if (pgd_present(*pgd))
 212                pud = pud_offset(pgd, gpa);
 213        else
 214                new_pud = pud_alloc_one(kvm->mm, gpa);
 215
 216        pmd = NULL;
 217        if (pud && pud_present(*pud))
 218                pmd = pmd_offset(pud, gpa);
 219        else
 220                new_pmd = pmd_alloc_one(kvm->mm, gpa);
 221
 222        if (level == 0 && !(pmd && pmd_present(*pmd)))
 223                new_ptep = kvmppc_pte_alloc();
 224
 225        /* Check if we might have been invalidated; let the guest retry if so */
 226        spin_lock(&kvm->mmu_lock);
 227        ret = -EAGAIN;
 228        if (mmu_notifier_retry(kvm, mmu_seq))
 229                goto out_unlock;
 230
 231        /* Now traverse again under the lock and change the tree */
 232        ret = -ENOMEM;
 233        if (pgd_none(*pgd)) {
 234                if (!new_pud)
 235                        goto out_unlock;
 236                pgd_populate(kvm->mm, pgd, new_pud);
 237                new_pud = NULL;
 238        }
 239        pud = pud_offset(pgd, gpa);
 240        if (pud_none(*pud)) {
 241                if (!new_pmd)
 242                        goto out_unlock;
 243                pud_populate(kvm->mm, pud, new_pmd);
 244                new_pmd = NULL;
 245        }
 246        pmd = pmd_offset(pud, gpa);
 247        if (pmd_large(*pmd)) {
 248                /* Someone else has instantiated a large page here; retry */
 249                ret = -EAGAIN;
 250                goto out_unlock;
 251        }
 252        if (level == 1 && !pmd_none(*pmd)) {
 253                /*
 254                 * There's a page table page here, but we wanted
 255                 * to install a large page.  Tell the caller and let
 256                 * it try installing a normal page if it wants.
 257                 */
 258                ret = -EBUSY;
 259                goto out_unlock;
 260        }
 261        if (level == 0) {
 262                if (pmd_none(*pmd)) {
 263                        if (!new_ptep)
 264                                goto out_unlock;
 265                        pmd_populate(kvm->mm, pmd, new_ptep);
 266                        new_ptep = NULL;
 267                }
 268                ptep = pte_offset_kernel(pmd, gpa);
 269                if (pte_present(*ptep)) {
 270                        /* PTE was previously valid, so invalidate it */
 271                        old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT,
 272                                                      0, gpa, 0);
 273                        kvmppc_radix_tlbie_page(kvm, gpa, 0);
 274                        if (old & _PAGE_DIRTY)
 275                                mark_page_dirty(kvm, gpa >> PAGE_SHIFT);
 276                }
 277                kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
 278        } else {
 279                kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
 280        }
 281        ret = 0;
 282
 283 out_unlock:
 284        spin_unlock(&kvm->mmu_lock);
 285        if (new_pud)
 286                pud_free(kvm->mm, new_pud);
 287        if (new_pmd)
 288                pmd_free(kvm->mm, new_pmd);
 289        if (new_ptep)
 290                kvmppc_pte_free(new_ptep);
 291        return ret;
 292}
 293
 294int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 295                                   unsigned long ea, unsigned long dsisr)
 296{
 297        struct kvm *kvm = vcpu->kvm;
 298        unsigned long mmu_seq, pte_size;
 299        unsigned long gpa, gfn, hva, pfn;
 300        struct kvm_memory_slot *memslot;
 301        struct page *page = NULL, *pages[1];
 302        long ret, npages, ok;
 303        unsigned int writing;
 304        struct vm_area_struct *vma;
 305        unsigned long flags;
 306        pte_t pte, *ptep;
 307        unsigned long pgflags;
 308        unsigned int shift, level;
 309
 310        /* Check for unusual errors */
 311        if (dsisr & DSISR_UNSUPP_MMU) {
 312                pr_err("KVM: Got unsupported MMU fault\n");
 313                return -EFAULT;
 314        }
 315        if (dsisr & DSISR_BADACCESS) {
 316                /* Reflect to the guest as DSI */
 317                pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
 318                kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
 319                return RESUME_GUEST;
 320        }
 321
 322        /* Translate the logical address and get the page */
 323        gpa = vcpu->arch.fault_gpa & ~0xfffUL;
 324        gpa &= ~0xF000000000000000ul;
 325        gfn = gpa >> PAGE_SHIFT;
 326        if (!(dsisr & DSISR_PRTABLE_FAULT))
 327                gpa |= ea & 0xfff;
 328        memslot = gfn_to_memslot(kvm, gfn);
 329
 330        /* No memslot means it's an emulated MMIO region */
 331        if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
 332                if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
 333                             DSISR_SET_RC)) {
 334                        /*
 335                         * Bad address in guest page table tree, or other
 336                         * unusual error - reflect it to the guest as DSI.
 337                         */
 338                        kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
 339                        return RESUME_GUEST;
 340                }
 341                return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
 342                                              dsisr & DSISR_ISSTORE);
 343        }
 344
 345        /* used to check for invalidations in progress */
 346        mmu_seq = kvm->mmu_notifier_seq;
 347        smp_rmb();
 348
 349        writing = (dsisr & DSISR_ISSTORE) != 0;
 350        hva = gfn_to_hva_memslot(memslot, gfn);
 351        if (dsisr & DSISR_SET_RC) {
 352                /*
 353                 * Need to set an R or C bit in the 2nd-level tables;
 354                 * if the relevant bits aren't already set in the linux
 355                 * page tables, fall through to do the gup_fast to
 356                 * set them in the linux page tables too.
 357                 */
 358                ok = 0;
 359                pgflags = _PAGE_ACCESSED;
 360                if (writing)
 361                        pgflags |= _PAGE_DIRTY;
 362                local_irq_save(flags);
 363                ptep = find_current_mm_pte(current->mm->pgd, hva, NULL, NULL);
 364                if (ptep) {
 365                        pte = READ_ONCE(*ptep);
 366                        if (pte_present(pte) &&
 367                            (pte_val(pte) & pgflags) == pgflags)
 368                                ok = 1;
 369                }
 370                local_irq_restore(flags);
 371                if (ok) {
 372                        spin_lock(&kvm->mmu_lock);
 373                        if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
 374                                spin_unlock(&kvm->mmu_lock);
 375                                return RESUME_GUEST;
 376                        }
 377                        /*
 378                         * We are walking the secondary page table here. We can do this
 379                         * without disabling irq.
 380                         */
 381                        ptep = __find_linux_pte(kvm->arch.pgtable,
 382                                                gpa, NULL, &shift);
 383                        if (ptep && pte_present(*ptep)) {
 384                                kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
 385                                                        gpa, shift);
 386                                spin_unlock(&kvm->mmu_lock);
 387                                return RESUME_GUEST;
 388                        }
 389                        spin_unlock(&kvm->mmu_lock);
 390                }
 391        }
 392
 393        ret = -EFAULT;
 394        pfn = 0;
 395        pte_size = PAGE_SIZE;
 396        pgflags = _PAGE_READ | _PAGE_EXEC;
 397        level = 0;
 398        npages = get_user_pages_fast(hva, 1, writing, pages);
 399        if (npages < 1) {
 400                /* Check if it's an I/O mapping */
 401                down_read(&current->mm->mmap_sem);
 402                vma = find_vma(current->mm, hva);
 403                if (vma && vma->vm_start <= hva && hva < vma->vm_end &&
 404                    (vma->vm_flags & VM_PFNMAP)) {
 405                        pfn = vma->vm_pgoff +
 406                                ((hva - vma->vm_start) >> PAGE_SHIFT);
 407                        pgflags = pgprot_val(vma->vm_page_prot);
 408                }
 409                up_read(&current->mm->mmap_sem);
 410                if (!pfn)
 411                        return -EFAULT;
 412        } else {
 413                page = pages[0];
 414                pfn = page_to_pfn(page);
 415                if (PageHuge(page)) {
 416                        page = compound_head(page);
 417                        pte_size <<= compound_order(page);
 418                        /* See if we can insert a 2MB large-page PTE here */
 419                        if (pte_size >= PMD_SIZE &&
 420                            (gpa & PMD_MASK & PAGE_MASK) ==
 421                            (hva & PMD_MASK & PAGE_MASK)) {
 422                                level = 1;
 423                                pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
 424                        }
 425                }
 426                /* See if we can provide write access */
 427                if (writing) {
 428                        /*
 429                         * We assume gup_fast has set dirty on the host PTE.
 430                         */
 431                        pgflags |= _PAGE_WRITE;
 432                } else {
 433                        local_irq_save(flags);
 434                        ptep = find_current_mm_pte(current->mm->pgd,
 435                                                   hva, NULL, NULL);
 436                        if (ptep && pte_write(*ptep) && pte_dirty(*ptep))
 437                                pgflags |= _PAGE_WRITE;
 438                        local_irq_restore(flags);
 439                }
 440        }
 441
 442        /*
 443         * Compute the PTE value that we need to insert.
 444         */
 445        pgflags |= _PAGE_PRESENT | _PAGE_PTE | _PAGE_ACCESSED;
 446        if (pgflags & _PAGE_WRITE)
 447                pgflags |= _PAGE_DIRTY;
 448        pte = pfn_pte(pfn, __pgprot(pgflags));
 449
 450        /* Allocate space in the tree and write the PTE */
 451        ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
 452        if (ret == -EBUSY) {
 453                /*
 454                 * There's already a PMD where wanted to install a large page;
 455                 * for now, fall back to installing a small page.
 456                 */
 457                level = 0;
 458                pfn |= gfn & ((PMD_SIZE >> PAGE_SHIFT) - 1);
 459                pte = pfn_pte(pfn, __pgprot(pgflags));
 460                ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
 461        }
 462        if (ret == 0 || ret == -EAGAIN)
 463                ret = RESUME_GUEST;
 464
 465        if (page) {
 466                /*
 467                 * We drop pages[0] here, not page because page might
 468                 * have been set to the head page of a compound, but
 469                 * we have to drop the reference on the correct tail
 470                 * page to match the get inside gup()
 471                 */
 472                put_page(pages[0]);
 473        }
 474        return ret;
 475}
 476
 477static void mark_pages_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot,
 478                             unsigned long gfn, unsigned int order)
 479{
 480        unsigned long i, limit;
 481        unsigned long *dp;
 482
 483        if (!memslot->dirty_bitmap)
 484                return;
 485        limit = 1ul << order;
 486        if (limit < BITS_PER_LONG) {
 487                for (i = 0; i < limit; ++i)
 488                        mark_page_dirty(kvm, gfn + i);
 489                return;
 490        }
 491        dp = memslot->dirty_bitmap + (gfn - memslot->base_gfn);
 492        limit /= BITS_PER_LONG;
 493        for (i = 0; i < limit; ++i)
 494                *dp++ = ~0ul;
 495}
 496
 497/* Called with kvm->lock held */
 498int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
 499                    unsigned long gfn)
 500{
 501        pte_t *ptep;
 502        unsigned long gpa = gfn << PAGE_SHIFT;
 503        unsigned int shift;
 504        unsigned long old;
 505
 506        ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
 507        if (ptep && pte_present(*ptep)) {
 508                old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0,
 509                                              gpa, shift);
 510                kvmppc_radix_tlbie_page(kvm, gpa, shift);
 511                if (old & _PAGE_DIRTY) {
 512                        if (!shift)
 513                                mark_page_dirty(kvm, gfn);
 514                        else
 515                                mark_pages_dirty(kvm, memslot,
 516                                                 gfn, shift - PAGE_SHIFT);
 517                }
 518        }
 519        return 0;                               
 520}
 521
 522/* Called with kvm->lock held */
 523int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
 524                  unsigned long gfn)
 525{
 526        pte_t *ptep;
 527        unsigned long gpa = gfn << PAGE_SHIFT;
 528        unsigned int shift;
 529        int ref = 0;
 530
 531        ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
 532        if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
 533                kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
 534                                        gpa, shift);
 535                /* XXX need to flush tlb here? */
 536                ref = 1;
 537        }
 538        return ref;
 539}
 540
 541/* Called with kvm->lock held */
 542int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
 543                       unsigned long gfn)
 544{
 545        pte_t *ptep;
 546        unsigned long gpa = gfn << PAGE_SHIFT;
 547        unsigned int shift;
 548        int ref = 0;
 549
 550        ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
 551        if (ptep && pte_present(*ptep) && pte_young(*ptep))
 552                ref = 1;
 553        return ref;
 554}
 555
 556/* Returns the number of PAGE_SIZE pages that are dirty */
 557static int kvm_radix_test_clear_dirty(struct kvm *kvm,
 558                                struct kvm_memory_slot *memslot, int pagenum)
 559{
 560        unsigned long gfn = memslot->base_gfn + pagenum;
 561        unsigned long gpa = gfn << PAGE_SHIFT;
 562        pte_t *ptep;
 563        unsigned int shift;
 564        int ret = 0;
 565
 566        ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
 567        if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
 568                ret = 1;
 569                if (shift)
 570                        ret = 1 << (shift - PAGE_SHIFT);
 571                kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
 572                                        gpa, shift);
 573                kvmppc_radix_tlbie_page(kvm, gpa, shift);
 574        }
 575        return ret;
 576}
 577
 578long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
 579                        struct kvm_memory_slot *memslot, unsigned long *map)
 580{
 581        unsigned long i, j;
 582        unsigned long n, *p;
 583        int npages;
 584
 585        /*
 586         * Radix accumulates dirty bits in the first half of the
 587         * memslot's dirty_bitmap area, for when pages are paged
 588         * out or modified by the host directly.  Pick up these
 589         * bits and add them to the map.
 590         */
 591        n = kvm_dirty_bitmap_bytes(memslot) / sizeof(long);
 592        p = memslot->dirty_bitmap;
 593        for (i = 0; i < n; ++i)
 594                map[i] |= xchg(&p[i], 0);
 595
 596        for (i = 0; i < memslot->npages; i = j) {
 597                npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
 598
 599                /*
 600                 * Note that if npages > 0 then i must be a multiple of npages,
 601                 * since huge pages are only used to back the guest at guest
 602                 * real addresses that are a multiple of their size.
 603                 * Since we have at most one PTE covering any given guest
 604                 * real address, if npages > 1 we can skip to i + npages.
 605                 */
 606                j = i + 1;
 607                if (npages)
 608                        for (j = i; npages; ++j, --npages)
 609                                __set_bit_le(j, map);
 610        }
 611        return 0;
 612}
 613
 614static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
 615                                 int psize, int *indexp)
 616{
 617        if (!mmu_psize_defs[psize].shift)
 618                return;
 619        info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
 620                (mmu_psize_defs[psize].ap << 29);
 621        ++(*indexp);
 622}
 623
 624int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
 625{
 626        int i;
 627
 628        if (!radix_enabled())
 629                return -EINVAL;
 630        memset(info, 0, sizeof(*info));
 631
 632        /* 4k page size */
 633        info->geometries[0].page_shift = 12;
 634        info->geometries[0].level_bits[0] = 9;
 635        for (i = 1; i < 4; ++i)
 636                info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
 637        /* 64k page size */
 638        info->geometries[1].page_shift = 16;
 639        for (i = 0; i < 4; ++i)
 640                info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
 641
 642        i = 0;
 643        add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
 644        add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
 645        add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
 646        add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
 647
 648        return 0;
 649}
 650
 651int kvmppc_init_vm_radix(struct kvm *kvm)
 652{
 653        kvm->arch.pgtable = pgd_alloc(kvm->mm);
 654        if (!kvm->arch.pgtable)
 655                return -ENOMEM;
 656        return 0;
 657}
 658
 659void kvmppc_free_radix(struct kvm *kvm)
 660{
 661        unsigned long ig, iu, im;
 662        pte_t *pte;
 663        pmd_t *pmd;
 664        pud_t *pud;
 665        pgd_t *pgd;
 666
 667        if (!kvm->arch.pgtable)
 668                return;
 669        pgd = kvm->arch.pgtable;
 670        for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
 671                if (!pgd_present(*pgd))
 672                        continue;
 673                pud = pud_offset(pgd, 0);
 674                for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++pud) {
 675                        if (!pud_present(*pud))
 676                                continue;
 677                        pmd = pmd_offset(pud, 0);
 678                        for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
 679                                if (pmd_huge(*pmd)) {
 680                                        pmd_clear(pmd);
 681                                        continue;
 682                                }
 683                                if (!pmd_present(*pmd))
 684                                        continue;
 685                                pte = pte_offset_map(pmd, 0);
 686                                memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
 687                                kvmppc_pte_free(pte);
 688                                pmd_clear(pmd);
 689                        }
 690                        pmd_free(kvm->mm, pmd_offset(pud, 0));
 691                        pud_clear(pud);
 692                }
 693                pud_free(kvm->mm, pud_offset(pgd, 0));
 694                pgd_clear(pgd);
 695        }
 696        pgd_free(kvm->mm, kvm->arch.pgtable);
 697}
 698
 699static void pte_ctor(void *addr)
 700{
 701        memset(addr, 0, PTE_TABLE_SIZE);
 702}
 703
 704int kvmppc_radix_init(void)
 705{
 706        unsigned long size = sizeof(void *) << PTE_INDEX_SIZE;
 707
 708        kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
 709        if (!kvm_pte_cache)
 710                return -ENOMEM;
 711        return 0;
 712}
 713
 714void kvmppc_radix_exit(void)
 715{
 716        kmem_cache_destroy(kvm_pte_cache);
 717}
 718