linux/arch/powerpc/include/asm/kvm_book3s_64.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *
   4 * Copyright SUSE Linux Products GmbH 2010
   5 *
   6 * Authors: Alexander Graf <agraf@suse.de>
   7 */
   8
   9#ifndef __ASM_KVM_BOOK3S_64_H__
  10#define __ASM_KVM_BOOK3S_64_H__
  11
  12#include <linux/string.h>
  13#include <asm/bitops.h>
  14#include <asm/book3s/64/mmu-hash.h>
  15#include <asm/cpu_has_feature.h>
  16#include <asm/ppc-opcode.h>
  17#include <asm/pte-walk.h>
  18
  19#ifdef CONFIG_PPC_PSERIES
  20static inline bool kvmhv_on_pseries(void)
  21{
  22        return !cpu_has_feature(CPU_FTR_HVMODE);
  23}
  24#else
  25static inline bool kvmhv_on_pseries(void)
  26{
  27        return false;
  28}
  29#endif
  30
  31/*
  32 * Structure for a nested guest, that is, for a guest that is managed by
  33 * one of our guests.
  34 */
  35struct kvm_nested_guest {
  36        struct kvm *l1_host;            /* L1 VM that owns this nested guest */
  37        int l1_lpid;                    /* lpid L1 guest thinks this guest is */
  38        int shadow_lpid;                /* real lpid of this nested guest */
  39        pgd_t *shadow_pgtable;          /* our page table for this guest */
  40        u64 l1_gr_to_hr;                /* L1's addr of part'n-scoped table */
  41        u64 process_table;              /* process table entry for this guest */
  42        u64 hfscr;                      /* HFSCR that the L1 requested for this nested guest */
  43        long refcnt;                    /* number of pointers to this struct */
  44        struct mutex tlb_lock;          /* serialize page faults and tlbies */
  45        struct kvm_nested_guest *next;
  46        cpumask_t need_tlb_flush;
  47        cpumask_t cpu_in_guest;
  48        short prev_cpu[NR_CPUS];
  49        u8 radix;                       /* is this nested guest radix */
  50};
  51
  52/*
  53 * We define a nested rmap entry as a single 64-bit quantity
  54 * 0xFFF0000000000000   12-bit lpid field
  55 * 0x000FFFFFFFFFF000   40-bit guest 4k page frame number
  56 * 0x0000000000000001   1-bit  single entry flag
  57 */
  58#define RMAP_NESTED_LPID_MASK           0xFFF0000000000000UL
  59#define RMAP_NESTED_LPID_SHIFT          (52)
  60#define RMAP_NESTED_GPA_MASK            0x000FFFFFFFFFF000UL
  61#define RMAP_NESTED_IS_SINGLE_ENTRY     0x0000000000000001UL
  62
  63/* Structure for a nested guest rmap entry */
  64struct rmap_nested {
  65        struct llist_node list;
  66        u64 rmap;
  67};
  68
  69/*
  70 * for_each_nest_rmap_safe - iterate over the list of nested rmap entries
  71 *                           safe against removal of the list entry or NULL list
  72 * @pos:        a (struct rmap_nested *) to use as a loop cursor
  73 * @node:       pointer to the first entry
  74 *              NOTE: this can be NULL
  75 * @rmapp:      an (unsigned long *) in which to return the rmap entries on each
  76 *              iteration
  77 *              NOTE: this must point to already allocated memory
  78 *
  79 * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
  80 * rmap entry in the memslot. The list is always terminated by a "single entry"
  81 * stored in the list element of the final entry of the llist. If there is ONLY
  82 * a single entry then this is itself in the rmap entry of the memslot, not a
  83 * llist head pointer.
  84 *
  85 * Note that the iterator below assumes that a nested rmap entry is always
  86 * non-zero.  This is true for our usage because the LPID field is always
  87 * non-zero (zero is reserved for the host).
  88 *
  89 * This should be used to iterate over the list of rmap_nested entries with
  90 * processing done on the u64 rmap value given by each iteration. This is safe
  91 * against removal of list entries and it is always safe to call free on (pos).
  92 *
  93 * e.g.
  94 * struct rmap_nested *cursor;
  95 * struct llist_node *first;
  96 * unsigned long rmap;
  97 * for_each_nest_rmap_safe(cursor, first, &rmap) {
  98 *      do_something(rmap);
  99 *      free(cursor);
 100 * }
 101 */
 102#define for_each_nest_rmap_safe(pos, node, rmapp)                              \
 103        for ((pos) = llist_entry((node), typeof(*(pos)), list);                \
 104             (node) &&                                                         \
 105             (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ?     \
 106                          ((u64) (node)) : ((pos)->rmap))) &&                  \
 107             (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ?      \
 108                         ((struct llist_node *) ((pos) = NULL)) :              \
 109                         (pos)->list.next)), true);                            \
 110             (pos) = llist_entry((node), typeof(*(pos)), list))
 111
 112struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
 113                                          bool create);
 114void kvmhv_put_nested(struct kvm_nested_guest *gp);
 115int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
 116
 117/* Encoding of first parameter for H_TLB_INVALIDATE */
 118#define H_TLBIE_P1_ENC(ric, prs, r)     (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
 119                                         ___PPC_R(r))
 120
 121/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
 122#define PPC_MIN_HPT_ORDER       18
 123#define PPC_MAX_HPT_ORDER       46
 124
 125#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
 126static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
 127{
 128        preempt_disable();
 129        return &get_paca()->shadow_vcpu;
 130}
 131
 132static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
 133{
 134        preempt_enable();
 135}
 136#endif
 137
 138#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 139
 140static inline bool kvm_is_radix(struct kvm *kvm)
 141{
 142        return kvm->arch.radix;
 143}
 144
 145static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
 146{
 147        bool radix;
 148
 149        if (vcpu->arch.nested)
 150                radix = vcpu->arch.nested->radix;
 151        else
 152                radix = kvm_is_radix(vcpu->kvm);
 153
 154        return radix;
 155}
 156
 157int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr);
 158
 159#define KVM_DEFAULT_HPT_ORDER   24      /* 16MB HPT by default */
 160#endif
 161
 162/*
 163 * Invalid HDSISR value which is used to indicate when HW has not set the reg.
 164 * Used to work around an errata.
 165 */
 166#define HDSISR_CANARY   0x7fff
 167
 168/*
 169 * We use a lock bit in HPTE dword 0 to synchronize updates and
 170 * accesses to each HPTE, and another bit to indicate non-present
 171 * HPTEs.
 172 */
 173#define HPTE_V_HVLOCK   0x40UL
 174#define HPTE_V_ABSENT   0x20UL
 175
 176/*
 177 * We use this bit in the guest_rpte field of the revmap entry
 178 * to indicate a modified HPTE.
 179 */
 180#define HPTE_GR_MODIFIED        (1ul << 62)
 181
 182/* These bits are reserved in the guest view of the HPTE */
 183#define HPTE_GR_RESERVED        HPTE_GR_MODIFIED
 184
 185static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
 186{
 187        unsigned long tmp, old;
 188        __be64 be_lockbit, be_bits;
 189
 190        /*
 191         * We load/store in native endian, but the HTAB is in big endian. If
 192         * we byte swap all data we apply on the PTE we're implicitly correct
 193         * again.
 194         */
 195        be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
 196        be_bits = cpu_to_be64(bits);
 197
 198        asm volatile("  ldarx   %0,0,%2\n"
 199                     "  and.    %1,%0,%3\n"
 200                     "  bne     2f\n"
 201                     "  or      %0,%0,%4\n"
 202                     "  stdcx.  %0,0,%2\n"
 203                     "  beq+    2f\n"
 204                     "  mr      %1,%3\n"
 205                     "2:        isync"
 206                     : "=&r" (tmp), "=&r" (old)
 207                     : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
 208                     : "cc", "memory");
 209        return old == 0;
 210}
 211
 212static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
 213{
 214        hpte_v &= ~HPTE_V_HVLOCK;
 215        asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
 216        hpte[0] = cpu_to_be64(hpte_v);
 217}
 218
 219/* Without barrier */
 220static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
 221{
 222        hpte_v &= ~HPTE_V_HVLOCK;
 223        hpte[0] = cpu_to_be64(hpte_v);
 224}
 225
 226/*
 227 * These functions encode knowledge of the POWER7/8/9 hardware
 228 * interpretations of the HPTE LP (large page size) field.
 229 */
 230static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
 231{
 232        unsigned int lphi;
 233
 234        if (!(h & HPTE_V_LARGE))
 235                return 12;      /* 4kB */
 236        lphi = (l >> 16) & 0xf;
 237        switch ((l >> 12) & 0xf) {
 238        case 0:
 239                return !lphi ? 24 : 0;          /* 16MB */
 240                break;
 241        case 1:
 242                return 16;                      /* 64kB */
 243                break;
 244        case 3:
 245                return !lphi ? 34 : 0;          /* 16GB */
 246                break;
 247        case 7:
 248                return (16 << 8) + 12;          /* 64kB in 4kB */
 249                break;
 250        case 8:
 251                if (!lphi)
 252                        return (24 << 8) + 16;  /* 16MB in 64kkB */
 253                if (lphi == 3)
 254                        return (24 << 8) + 12;  /* 16MB in 4kB */
 255                break;
 256        }
 257        return 0;
 258}
 259
 260static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
 261{
 262        return kvmppc_hpte_page_shifts(h, l) & 0xff;
 263}
 264
 265static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
 266{
 267        int tmp = kvmppc_hpte_page_shifts(h, l);
 268
 269        if (tmp >= 0x100)
 270                tmp >>= 8;
 271        return tmp;
 272}
 273
 274static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
 275{
 276        int shift = kvmppc_hpte_actual_page_shift(v, r);
 277
 278        if (shift)
 279                return 1ul << shift;
 280        return 0;
 281}
 282
 283static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
 284{
 285        switch (base_shift) {
 286        case 12:
 287                switch (actual_shift) {
 288                case 12:
 289                        return 0;
 290                case 16:
 291                        return 7;
 292                case 24:
 293                        return 0x38;
 294                }
 295                break;
 296        case 16:
 297                switch (actual_shift) {
 298                case 16:
 299                        return 1;
 300                case 24:
 301                        return 8;
 302                }
 303                break;
 304        case 24:
 305                return 0;
 306        }
 307        return -1;
 308}
 309
 310static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
 311                                             unsigned long pte_index)
 312{
 313        int a_pgshift, b_pgshift;
 314        unsigned long rb = 0, va_low, sllp;
 315
 316        b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
 317        if (a_pgshift >= 0x100) {
 318                b_pgshift &= 0xff;
 319                a_pgshift >>= 8;
 320        }
 321
 322        /*
 323         * Ignore the top 14 bits of va
 324         * v have top two bits covering segment size, hence move
 325         * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
 326         * AVA field in v also have the lower 23 bits ignored.
 327         * For base page size 4K we need 14 .. 65 bits (so need to
 328         * collect extra 11 bits)
 329         * For others we need 14..14+i
 330         */
 331        /* This covers 14..54 bits of va*/
 332        rb = (v & ~0x7fUL) << 16;               /* AVA field */
 333
 334        /*
 335         * AVA in v had cleared lower 23 bits. We need to derive
 336         * that from pteg index
 337         */
 338        va_low = pte_index >> 3;
 339        if (v & HPTE_V_SECONDARY)
 340                va_low = ~va_low;
 341        /*
 342         * get the vpn bits from va_low using reverse of hashing.
 343         * In v we have va with 23 bits dropped and then left shifted
 344         * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
 345         * right shift it with (SID_SHIFT - (23 - 7))
 346         */
 347        if (!(v & HPTE_V_1TB_SEG))
 348                va_low ^= v >> (SID_SHIFT - 16);
 349        else
 350                va_low ^= v >> (SID_SHIFT_1T - 16);
 351        va_low &= 0x7ff;
 352
 353        if (b_pgshift <= 12) {
 354                if (a_pgshift > 12) {
 355                        sllp = (a_pgshift == 16) ? 5 : 4;
 356                        rb |= sllp << 5;        /*  AP field */
 357                }
 358                rb |= (va_low & 0x7ff) << 12;   /* remaining 11 bits of AVA */
 359        } else {
 360                int aval_shift;
 361                /*
 362                 * remaining bits of AVA/LP fields
 363                 * Also contain the rr bits of LP
 364                 */
 365                rb |= (va_low << b_pgshift) & 0x7ff000;
 366                /*
 367                 * Now clear not needed LP bits based on actual psize
 368                 */
 369                rb &= ~((1ul << a_pgshift) - 1);
 370                /*
 371                 * AVAL field 58..77 - base_page_shift bits of va
 372                 * we have space for 58..64 bits, Missing bits should
 373                 * be zero filled. +1 is to take care of L bit shift
 374                 */
 375                aval_shift = 64 - (77 - b_pgshift) + 1;
 376                rb |= ((va_low << aval_shift) & 0xfe);
 377
 378                rb |= 1;                /* L field */
 379                rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
 380        }
 381        rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;   /* B field */
 382        return rb;
 383}
 384
 385static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
 386{
 387        return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
 388}
 389
 390static inline int hpte_is_writable(unsigned long ptel)
 391{
 392        unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
 393
 394        return pp != PP_RXRX && pp != PP_RXXX;
 395}
 396
 397static inline unsigned long hpte_make_readonly(unsigned long ptel)
 398{
 399        if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
 400                ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
 401        else
 402                ptel |= PP_RXRX;
 403        return ptel;
 404}
 405
 406static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
 407{
 408        unsigned int wimg = hptel & HPTE_R_WIMG;
 409
 410        /* Handle SAO */
 411        if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
 412            cpu_has_feature(CPU_FTR_ARCH_206))
 413                wimg = HPTE_R_M;
 414
 415        if (!is_ci)
 416                return wimg == HPTE_R_M;
 417        /*
 418         * if host is mapped cache inhibited, make sure hptel also have
 419         * cache inhibited.
 420         */
 421        if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
 422                return false;
 423        return !!(wimg & HPTE_R_I);
 424}
 425
 426/*
 427 * If it's present and writable, atomically set dirty and referenced bits and
 428 * return the PTE, otherwise return 0.
 429 */
 430static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
 431{
 432        pte_t old_pte, new_pte = __pte(0);
 433
 434        while (1) {
 435                /*
 436                 * Make sure we don't reload from ptep
 437                 */
 438                old_pte = READ_ONCE(*ptep);
 439                /*
 440                 * wait until H_PAGE_BUSY is clear then set it atomically
 441                 */
 442                if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
 443                        cpu_relax();
 444                        continue;
 445                }
 446                /* If pte is not present return None */
 447                if (unlikely(!pte_present(old_pte)))
 448                        return __pte(0);
 449
 450                new_pte = pte_mkyoung(old_pte);
 451                if (writing && pte_write(old_pte))
 452                        new_pte = pte_mkdirty(new_pte);
 453
 454                if (pte_xchg(ptep, old_pte, new_pte))
 455                        break;
 456        }
 457        return new_pte;
 458}
 459
 460static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
 461{
 462        if (key)
 463                return PP_RWRX <= pp && pp <= PP_RXRX;
 464        return true;
 465}
 466
 467static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
 468{
 469        if (key)
 470                return pp == PP_RWRW;
 471        return pp <= PP_RWRW;
 472}
 473
 474static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
 475{
 476        unsigned long skey;
 477
 478        skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
 479                ((hpte_r & HPTE_R_KEY_LO) >> 9);
 480        return (amr >> (62 - 2 * skey)) & 3;
 481}
 482
 483static inline void lock_rmap(unsigned long *rmap)
 484{
 485        do {
 486                while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
 487                        cpu_relax();
 488        } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
 489}
 490
 491static inline void unlock_rmap(unsigned long *rmap)
 492{
 493        __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
 494}
 495
 496static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
 497                                   unsigned long pagesize)
 498{
 499        unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
 500
 501        if (pagesize <= PAGE_SIZE)
 502                return true;
 503        return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
 504}
 505
 506/*
 507 * This works for 4k, 64k and 16M pages on POWER7,
 508 * and 4k and 16M pages on PPC970.
 509 */
 510static inline unsigned long slb_pgsize_encoding(unsigned long psize)
 511{
 512        unsigned long senc = 0;
 513
 514        if (psize > 0x1000) {
 515                senc = SLB_VSID_L;
 516                if (psize == 0x10000)
 517                        senc |= SLB_VSID_LP_01;
 518        }
 519        return senc;
 520}
 521
 522static inline int is_vrma_hpte(unsigned long hpte_v)
 523{
 524        return (hpte_v & ~0xffffffUL) ==
 525                (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
 526}
 527
 528#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 529/*
 530 * Note modification of an HPTE; set the HPTE modified bit
 531 * if anyone is interested.
 532 */
 533static inline void note_hpte_modification(struct kvm *kvm,
 534                                          struct revmap_entry *rev)
 535{
 536        if (atomic_read(&kvm->arch.hpte_mod_interest))
 537                rev->guest_rpte |= HPTE_GR_MODIFIED;
 538}
 539
 540/*
 541 * Like kvm_memslots(), but for use in real mode when we can't do
 542 * any RCU stuff (since the secondary threads are offline from the
 543 * kernel's point of view), and we can't print anything.
 544 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
 545 */
 546static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
 547{
 548        return rcu_dereference_raw_check(kvm->memslots[0]);
 549}
 550
 551extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
 552extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
 553
 554extern void kvmhv_rm_send_ipi(int cpu);
 555
 556static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
 557{
 558        /* HPTEs are 2**4 bytes long */
 559        return 1UL << (hpt->order - 4);
 560}
 561
 562static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
 563{
 564        /* 128 (2**7) bytes in each HPTEG */
 565        return (1UL << (hpt->order - 7)) - 1;
 566}
 567
 568/* Set bits in a dirty bitmap, which is in LE format */
 569static inline void set_dirty_bits(unsigned long *map, unsigned long i,
 570                                  unsigned long npages)
 571{
 572
 573        if (npages >= 8)
 574                memset((char *)map + i / 8, 0xff, npages / 8);
 575        else
 576                for (; npages; ++i, --npages)
 577                        __set_bit_le(i, map);
 578}
 579
 580static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
 581                                         unsigned long npages)
 582{
 583        if (npages >= 8)
 584                memset((char *)map + i / 8, 0xff, npages / 8);
 585        else
 586                for (; npages; ++i, --npages)
 587                        set_bit_le(i, map);
 588}
 589
 590static inline u64 sanitize_msr(u64 msr)
 591{
 592        msr &= ~MSR_HV;
 593        msr |= MSR_ME;
 594        return msr;
 595}
 596
 597#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 598static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
 599{
 600        vcpu->arch.regs.ccr  = vcpu->arch.cr_tm;
 601        vcpu->arch.regs.xer = vcpu->arch.xer_tm;
 602        vcpu->arch.regs.link  = vcpu->arch.lr_tm;
 603        vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
 604        vcpu->arch.amr = vcpu->arch.amr_tm;
 605        vcpu->arch.ppr = vcpu->arch.ppr_tm;
 606        vcpu->arch.dscr = vcpu->arch.dscr_tm;
 607        vcpu->arch.tar = vcpu->arch.tar_tm;
 608        memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
 609               sizeof(vcpu->arch.regs.gpr));
 610        vcpu->arch.fp  = vcpu->arch.fp_tm;
 611        vcpu->arch.vr  = vcpu->arch.vr_tm;
 612        vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
 613}
 614
 615static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
 616{
 617        vcpu->arch.cr_tm  = vcpu->arch.regs.ccr;
 618        vcpu->arch.xer_tm = vcpu->arch.regs.xer;
 619        vcpu->arch.lr_tm  = vcpu->arch.regs.link;
 620        vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
 621        vcpu->arch.amr_tm = vcpu->arch.amr;
 622        vcpu->arch.ppr_tm = vcpu->arch.ppr;
 623        vcpu->arch.dscr_tm = vcpu->arch.dscr;
 624        vcpu->arch.tar_tm = vcpu->arch.tar;
 625        memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
 626               sizeof(vcpu->arch.regs.gpr));
 627        vcpu->arch.fp_tm  = vcpu->arch.fp;
 628        vcpu->arch.vr_tm  = vcpu->arch.vr;
 629        vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
 630}
 631#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 632
 633extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
 634                             unsigned long gpa, unsigned int level,
 635                             unsigned long mmu_seq, unsigned int lpid,
 636                             unsigned long *rmapp, struct rmap_nested **n_rmap);
 637extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
 638                                   struct rmap_nested **n_rmap);
 639extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
 640                                           unsigned long clr, unsigned long set,
 641                                           unsigned long hpa, unsigned long nbytes);
 642extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
 643                                const struct kvm_memory_slot *memslot,
 644                                unsigned long gpa, unsigned long hpa,
 645                                unsigned long nbytes);
 646
 647static inline pte_t *
 648find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
 649                                unsigned *hshift)
 650{
 651        pte_t *pte;
 652
 653        pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
 654        return pte;
 655}
 656
 657static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
 658                                            unsigned *hshift)
 659{
 660        pte_t *pte;
 661
 662        VM_WARN(!spin_is_locked(&kvm->mmu_lock),
 663                "%s called with kvm mmu_lock not held \n", __func__);
 664        pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
 665
 666        return pte;
 667}
 668
 669static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
 670                                       unsigned long ea, unsigned *hshift)
 671{
 672        pte_t *pte;
 673
 674        VM_WARN(!spin_is_locked(&kvm->mmu_lock),
 675                "%s called with kvm mmu_lock not held \n", __func__);
 676
 677        if (mmu_notifier_retry(kvm, mmu_seq))
 678                return NULL;
 679
 680        pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
 681
 682        return pte;
 683}
 684
 685extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
 686                                        unsigned long ea, unsigned *hshift);
 687
 688#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 689
 690#endif /* __ASM_KVM_BOOK3S_64_H__ */
 691