linux/arch/arm64/include/asm/kvm_mmu.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2012,2013 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#ifndef __ARM64_KVM_MMU_H__
   8#define __ARM64_KVM_MMU_H__
   9
  10#include <asm/page.h>
  11#include <asm/memory.h>
  12#include <asm/cpufeature.h>
  13
  14/*
  15 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
  16 * "negative" addresses. This makes it impossible to directly share
  17 * mappings with the kernel.
  18 *
  19 * Instead, give the HYP mode its own VA region at a fixed offset from
  20 * the kernel by just masking the top bits (which are all ones for a
  21 * kernel address). We need to find out how many bits to mask.
  22 *
  23 * We want to build a set of page tables that cover both parts of the
  24 * idmap (the trampoline page used to initialize EL2), and our normal
  25 * runtime VA space, at the same time.
  26 *
  27 * Given that the kernel uses VA_BITS for its entire address space,
  28 * and that half of that space (VA_BITS - 1) is used for the linear
  29 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
  30 *
  31 * The main question is "Within the VA_BITS space, does EL2 use the
  32 * top or the bottom half of that space to shadow the kernel's linear
  33 * mapping?". As we need to idmap the trampoline page, this is
  34 * determined by the range in which this page lives.
  35 *
  36 * If the page is in the bottom half, we have to use the top half. If
  37 * the page is in the top half, we have to use the bottom half:
  38 *
  39 * T = __pa_symbol(__hyp_idmap_text_start)
  40 * if (T & BIT(VA_BITS - 1))
  41 *      HYP_VA_MIN = 0  //idmap in upper half
  42 * else
  43 *      HYP_VA_MIN = 1 << (VA_BITS - 1)
  44 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
  45 *
  46 * This of course assumes that the trampoline page exists within the
  47 * VA_BITS range. If it doesn't, then it means we're in the odd case
  48 * where the kernel idmap (as well as HYP) uses more levels than the
  49 * kernel runtime page tables (as seen when the kernel is configured
  50 * for 4k pages, 39bits VA, and yet memory lives just above that
  51 * limit, forcing the idmap to use 4 levels of page tables while the
  52 * kernel itself only uses 3). In this particular case, it doesn't
  53 * matter which side of VA_BITS we use, as we're guaranteed not to
  54 * conflict with anything.
  55 *
  56 * When using VHE, there are no separate hyp mappings and all KVM
  57 * functionality is already mapped as part of the main kernel
  58 * mappings, and none of this applies in that case.
  59 */
  60
  61#ifdef __ASSEMBLY__
  62
  63#include <asm/alternative.h>
  64
  65/*
  66 * Convert a kernel VA into a HYP VA.
  67 * reg: VA to be converted.
  68 *
  69 * The actual code generation takes place in kvm_update_va_mask, and
  70 * the instructions below are only there to reserve the space and
  71 * perform the register allocation (kvm_update_va_mask uses the
  72 * specific registers encoded in the instructions).
  73 */
  74.macro kern_hyp_va      reg
  75alternative_cb kvm_update_va_mask
  76        and     \reg, \reg, #1          /* mask with va_mask */
  77        ror     \reg, \reg, #1          /* rotate to the first tag bit */
  78        add     \reg, \reg, #0          /* insert the low 12 bits of the tag */
  79        add     \reg, \reg, #0, lsl 12  /* insert the top 12 bits of the tag */
  80        ror     \reg, \reg, #63         /* rotate back */
  81alternative_cb_end
  82.endm
  83
  84#else
  85
  86#include <linux/pgtable.h>
  87#include <asm/pgalloc.h>
  88#include <asm/cache.h>
  89#include <asm/cacheflush.h>
  90#include <asm/mmu_context.h>
  91
  92void kvm_update_va_mask(struct alt_instr *alt,
  93                        __le32 *origptr, __le32 *updptr, int nr_inst);
  94void kvm_compute_layout(void);
  95
  96static __always_inline unsigned long __kern_hyp_va(unsigned long v)
  97{
  98        asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
  99                                    "ror %0, %0, #1\n"
 100                                    "add %0, %0, #0\n"
 101                                    "add %0, %0, #0, lsl 12\n"
 102                                    "ror %0, %0, #63\n",
 103                                    kvm_update_va_mask)
 104                     : "+r" (v));
 105        return v;
 106}
 107
 108#define kern_hyp_va(v)  ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
 109
 110/*
 111 * We currently support using a VM-specified IPA size. For backward
 112 * compatibility, the default IPA size is fixed to 40bits.
 113 */
 114#define KVM_PHYS_SHIFT  (40)
 115
 116#define kvm_phys_shift(kvm)             VTCR_EL2_IPA(kvm->arch.vtcr)
 117#define kvm_phys_size(kvm)              (_AC(1, ULL) << kvm_phys_shift(kvm))
 118#define kvm_phys_mask(kvm)              (kvm_phys_size(kvm) - _AC(1, ULL))
 119
 120static inline bool kvm_page_empty(void *ptr)
 121{
 122        struct page *ptr_page = virt_to_page(ptr);
 123        return page_count(ptr_page) == 1;
 124}
 125
 126#include <asm/stage2_pgtable.h>
 127
 128int create_hyp_mappings(void *from, void *to, pgprot_t prot);
 129int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
 130                           void __iomem **kaddr,
 131                           void __iomem **haddr);
 132int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
 133                             void **haddr);
 134void free_hyp_pgds(void);
 135
 136void stage2_unmap_vm(struct kvm *kvm);
 137int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
 138void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
 139int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 140                          phys_addr_t pa, unsigned long size, bool writable);
 141
 142int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
 143
 144void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
 145
 146phys_addr_t kvm_mmu_get_httbr(void);
 147phys_addr_t kvm_get_idmap_vector(void);
 148int kvm_mmu_init(void);
 149void kvm_clear_hyp_idmap(void);
 150
 151#define kvm_mk_pmd(ptep)                                        \
 152        __pmd(__phys_to_pmd_val(__pa(ptep)) | PMD_TYPE_TABLE)
 153#define kvm_mk_pud(pmdp)                                        \
 154        __pud(__phys_to_pud_val(__pa(pmdp)) | PMD_TYPE_TABLE)
 155#define kvm_mk_p4d(pmdp)                                        \
 156        __p4d(__phys_to_p4d_val(__pa(pmdp)) | PUD_TYPE_TABLE)
 157
 158#define kvm_set_pud(pudp, pud)          set_pud(pudp, pud)
 159
 160#define kvm_pfn_pte(pfn, prot)          pfn_pte(pfn, prot)
 161#define kvm_pfn_pmd(pfn, prot)          pfn_pmd(pfn, prot)
 162#define kvm_pfn_pud(pfn, prot)          pfn_pud(pfn, prot)
 163
 164#define kvm_pud_pfn(pud)                pud_pfn(pud)
 165
 166#define kvm_pmd_mkhuge(pmd)             pmd_mkhuge(pmd)
 167#define kvm_pud_mkhuge(pud)             pud_mkhuge(pud)
 168
 169static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
 170{
 171        pte_val(pte) |= PTE_S2_RDWR;
 172        return pte;
 173}
 174
 175static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
 176{
 177        pmd_val(pmd) |= PMD_S2_RDWR;
 178        return pmd;
 179}
 180
 181static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
 182{
 183        pud_val(pud) |= PUD_S2_RDWR;
 184        return pud;
 185}
 186
 187static inline pte_t kvm_s2pte_mkexec(pte_t pte)
 188{
 189        pte_val(pte) &= ~PTE_S2_XN;
 190        return pte;
 191}
 192
 193static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
 194{
 195        pmd_val(pmd) &= ~PMD_S2_XN;
 196        return pmd;
 197}
 198
 199static inline pud_t kvm_s2pud_mkexec(pud_t pud)
 200{
 201        pud_val(pud) &= ~PUD_S2_XN;
 202        return pud;
 203}
 204
 205static inline void kvm_set_s2pte_readonly(pte_t *ptep)
 206{
 207        pteval_t old_pteval, pteval;
 208
 209        pteval = READ_ONCE(pte_val(*ptep));
 210        do {
 211                old_pteval = pteval;
 212                pteval &= ~PTE_S2_RDWR;
 213                pteval |= PTE_S2_RDONLY;
 214                pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
 215        } while (pteval != old_pteval);
 216}
 217
 218static inline bool kvm_s2pte_readonly(pte_t *ptep)
 219{
 220        return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
 221}
 222
 223static inline bool kvm_s2pte_exec(pte_t *ptep)
 224{
 225        return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
 226}
 227
 228static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
 229{
 230        kvm_set_s2pte_readonly((pte_t *)pmdp);
 231}
 232
 233static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
 234{
 235        return kvm_s2pte_readonly((pte_t *)pmdp);
 236}
 237
 238static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
 239{
 240        return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
 241}
 242
 243static inline void kvm_set_s2pud_readonly(pud_t *pudp)
 244{
 245        kvm_set_s2pte_readonly((pte_t *)pudp);
 246}
 247
 248static inline bool kvm_s2pud_readonly(pud_t *pudp)
 249{
 250        return kvm_s2pte_readonly((pte_t *)pudp);
 251}
 252
 253static inline bool kvm_s2pud_exec(pud_t *pudp)
 254{
 255        return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN);
 256}
 257
 258static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
 259{
 260        return pud_mkyoung(pud);
 261}
 262
 263static inline bool kvm_s2pud_young(pud_t pud)
 264{
 265        return pud_young(pud);
 266}
 267
 268#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
 269
 270#ifdef __PAGETABLE_PMD_FOLDED
 271#define hyp_pmd_table_empty(pmdp) (0)
 272#else
 273#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
 274#endif
 275
 276#ifdef __PAGETABLE_PUD_FOLDED
 277#define hyp_pud_table_empty(pudp) (0)
 278#else
 279#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
 280#endif
 281
 282#ifdef __PAGETABLE_P4D_FOLDED
 283#define hyp_p4d_table_empty(p4dp) (0)
 284#else
 285#define hyp_p4d_table_empty(p4dp) kvm_page_empty(p4dp)
 286#endif
 287
 288struct kvm;
 289
 290#define kvm_flush_dcache_to_poc(a,l)    __flush_dcache_area((a), (l))
 291
 292static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 293{
 294        return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
 295}
 296
 297static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
 298{
 299        void *va = page_address(pfn_to_page(pfn));
 300
 301        /*
 302         * With FWB, we ensure that the guest always accesses memory using
 303         * cacheable attributes, and we don't have to clean to PoC when
 304         * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
 305         * PoU is not required either in this case.
 306         */
 307        if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
 308                return;
 309
 310        kvm_flush_dcache_to_poc(va, size);
 311}
 312
 313static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
 314                                                  unsigned long size)
 315{
 316        if (icache_is_aliasing()) {
 317                /* any kind of VIPT cache */
 318                __flush_icache_all();
 319        } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
 320                /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
 321                void *va = page_address(pfn_to_page(pfn));
 322
 323                invalidate_icache_range((unsigned long)va,
 324                                        (unsigned long)va + size);
 325        }
 326}
 327
 328static inline void __kvm_flush_dcache_pte(pte_t pte)
 329{
 330        if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
 331                struct page *page = pte_page(pte);
 332                kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
 333        }
 334}
 335
 336static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
 337{
 338        if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
 339                struct page *page = pmd_page(pmd);
 340                kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
 341        }
 342}
 343
 344static inline void __kvm_flush_dcache_pud(pud_t pud)
 345{
 346        if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
 347                struct page *page = pud_page(pud);
 348                kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
 349        }
 350}
 351
 352void kvm_set_way_flush(struct kvm_vcpu *vcpu);
 353void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
 354
 355static inline bool __kvm_cpu_uses_extended_idmap(void)
 356{
 357        return __cpu_uses_extended_idmap_level();
 358}
 359
 360static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
 361{
 362        return idmap_ptrs_per_pgd;
 363}
 364
 365/*
 366 * Can't use pgd_populate here, because the extended idmap adds an extra level
 367 * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended
 368 * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4.
 369 */
 370static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
 371                                       pgd_t *hyp_pgd,
 372                                       pgd_t *merged_hyp_pgd,
 373                                       unsigned long hyp_idmap_start)
 374{
 375        int idmap_idx;
 376        u64 pgd_addr;
 377
 378        /*
 379         * Use the first entry to access the HYP mappings. It is
 380         * guaranteed to be free, otherwise we wouldn't use an
 381         * extended idmap.
 382         */
 383        VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
 384        pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd));
 385        merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE);
 386
 387        /*
 388         * Create another extended level entry that points to the boot HYP map,
 389         * which contains an ID mapping of the HYP init code. We essentially
 390         * merge the boot and runtime HYP maps by doing so, but they don't
 391         * overlap anyway, so this is fine.
 392         */
 393        idmap_idx = hyp_idmap_start >> VA_BITS;
 394        VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
 395        pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd));
 396        merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE);
 397}
 398
 399static inline unsigned int kvm_get_vmid_bits(void)
 400{
 401        int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
 402
 403        return get_vmid_bits(reg);
 404}
 405
 406/*
 407 * We are not in the kvm->srcu critical section most of the time, so we take
 408 * the SRCU read lock here. Since we copy the data from the user page, we
 409 * can immediately drop the lock again.
 410 */
 411static inline int kvm_read_guest_lock(struct kvm *kvm,
 412                                      gpa_t gpa, void *data, unsigned long len)
 413{
 414        int srcu_idx = srcu_read_lock(&kvm->srcu);
 415        int ret = kvm_read_guest(kvm, gpa, data, len);
 416
 417        srcu_read_unlock(&kvm->srcu, srcu_idx);
 418
 419        return ret;
 420}
 421
 422static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
 423                                       const void *data, unsigned long len)
 424{
 425        int srcu_idx = srcu_read_lock(&kvm->srcu);
 426        int ret = kvm_write_guest(kvm, gpa, data, len);
 427
 428        srcu_read_unlock(&kvm->srcu, srcu_idx);
 429
 430        return ret;
 431}
 432
 433#ifdef CONFIG_KVM_INDIRECT_VECTORS
 434/*
 435 * EL2 vectors can be mapped and rerouted in a number of ways,
 436 * depending on the kernel configuration and CPU present:
 437 *
 438 * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
 439 *   hardening sequence is placed in one of the vector slots, which is
 440 *   executed before jumping to the real vectors.
 441 *
 442 * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
 443 *   ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
 444 *   hardening sequence is mapped next to the idmap page, and executed
 445 *   before jumping to the real vectors.
 446 *
 447 * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
 448 *   empty slot is selected, mapped next to the idmap page, and
 449 *   executed before jumping to the real vectors.
 450 *
 451 * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
 452 * VHE, as we don't have hypervisor-specific mappings. If the system
 453 * is VHE and yet selects this capability, it will be ignored.
 454 */
 455#include <asm/mmu.h>
 456
 457extern void *__kvm_bp_vect_base;
 458extern int __kvm_harden_el2_vector_slot;
 459
 460/*  This is called on both VHE and !VHE systems */
 461static inline void *kvm_get_hyp_vector(void)
 462{
 463        struct bp_hardening_data *data = arm64_get_bp_hardening_data();
 464        void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
 465        int slot = -1;
 466
 467        if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
 468                vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
 469                slot = data->hyp_vectors_slot;
 470        }
 471
 472        if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
 473                vect = __kvm_bp_vect_base;
 474                if (slot == -1)
 475                        slot = __kvm_harden_el2_vector_slot;
 476        }
 477
 478        if (slot != -1)
 479                vect += slot * SZ_2K;
 480
 481        return vect;
 482}
 483
 484/*  This is only called on a !VHE system */
 485static inline int kvm_map_vectors(void)
 486{
 487        /*
 488         * HBP  = ARM64_HARDEN_BRANCH_PREDICTOR
 489         * HEL2 = ARM64_HARDEN_EL2_VECTORS
 490         *
 491         * !HBP + !HEL2 -> use direct vectors
 492         *  HBP + !HEL2 -> use hardened vectors in place
 493         * !HBP +  HEL2 -> allocate one vector slot and use exec mapping
 494         *  HBP +  HEL2 -> use hardened vertors and use exec mapping
 495         */
 496        if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
 497                __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
 498                __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
 499        }
 500
 501        if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
 502                phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
 503                unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
 504
 505                /*
 506                 * Always allocate a spare vector slot, as we don't
 507                 * know yet which CPUs have a BP hardening slot that
 508                 * we can reuse.
 509                 */
 510                __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
 511                BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
 512                return create_hyp_exec_mappings(vect_pa, size,
 513                                                &__kvm_bp_vect_base);
 514        }
 515
 516        return 0;
 517}
 518#else
 519static inline void *kvm_get_hyp_vector(void)
 520{
 521        return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
 522}
 523
 524static inline int kvm_map_vectors(void)
 525{
 526        return 0;
 527}
 528#endif
 529
 530#ifdef CONFIG_ARM64_SSBD
 531DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 532
 533static inline int hyp_map_aux_data(void)
 534{
 535        int cpu, err;
 536
 537        for_each_possible_cpu(cpu) {
 538                u64 *ptr;
 539
 540                ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
 541                err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
 542                if (err)
 543                        return err;
 544        }
 545        return 0;
 546}
 547#else
 548static inline int hyp_map_aux_data(void)
 549{
 550        return 0;
 551}
 552#endif
 553
 554#define kvm_phys_to_vttbr(addr)         phys_to_ttbr(addr)
 555
 556/*
 557 * Get the magic number 'x' for VTTBR:BADDR of this KVM instance.
 558 * With v8.2 LVA extensions, 'x' should be a minimum of 6 with
 559 * 52bit IPS.
 560 */
 561static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels)
 562{
 563        int x = ARM64_VTTBR_X(ipa_shift, levels);
 564
 565        return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x;
 566}
 567
 568static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels)
 569{
 570        unsigned int x = arm64_vttbr_x(ipa_shift, levels);
 571
 572        return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x);
 573}
 574
 575static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
 576{
 577        return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
 578}
 579
 580static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
 581{
 582        struct kvm_vmid *vmid = &mmu->vmid;
 583        u64 vmid_field, baddr;
 584        u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
 585
 586        baddr = mmu->pgd_phys;
 587        vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
 588        return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
 589}
 590
 591/*
 592 * Must be called from hyp code running at EL2 with an updated VTTBR
 593 * and interrupts disabled.
 594 */
 595static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
 596{
 597        write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2);
 598        write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
 599
 600        /*
 601         * ARM errata 1165522 and 1530923 require the actual execution of the
 602         * above before we can switch to the EL1/EL0 translation regime used by
 603         * the guest.
 604         */
 605        asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 606}
 607
 608#endif /* __ASSEMBLY__ */
 609#endif /* __ARM64_KVM_MMU_H__ */
 610