linux/arch/arm64/include/asm/kvm_mmu.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012,2013 - ARM Ltd
   3 * Author: Marc Zyngier <marc.zyngier@arm.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#ifndef __ARM64_KVM_MMU_H__
  19#define __ARM64_KVM_MMU_H__
  20
  21#include <asm/page.h>
  22#include <asm/memory.h>
  23
  24/*
  25 * As we only have the TTBR0_EL2 register, we cannot express
  26 * "negative" addresses. This makes it impossible to directly share
  27 * mappings with the kernel.
  28 *
  29 * Instead, give the HYP mode its own VA region at a fixed offset from
  30 * the kernel by just masking the top bits (which are all ones for a
  31 * kernel address).
  32 */
  33#define HYP_PAGE_OFFSET_SHIFT   VA_BITS
  34#define HYP_PAGE_OFFSET_MASK    ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
  35#define HYP_PAGE_OFFSET         (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
  36
  37/*
  38 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
  39 * shared across all the page-tables. Conveniently, we use the last
  40 * possible page, where no kernel mapping will ever exist.
  41 */
  42#define TRAMPOLINE_VA           (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
  43
  44/*
  45 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
  46 * levels in addition to the PGD and potentially the PUD which are
  47 * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
  48 * tables use one level of tables less than the kernel.
  49 */
  50#ifdef CONFIG_ARM64_64K_PAGES
  51#define KVM_MMU_CACHE_MIN_PAGES 1
  52#else
  53#define KVM_MMU_CACHE_MIN_PAGES 2
  54#endif
  55
  56#ifdef __ASSEMBLY__
  57
  58/*
  59 * Convert a kernel VA into a HYP VA.
  60 * reg: VA to be converted.
  61 */
  62.macro kern_hyp_va      reg
  63        and     \reg, \reg, #HYP_PAGE_OFFSET_MASK
  64.endm
  65
  66#else
  67
  68#include <asm/pgalloc.h>
  69#include <asm/cachetype.h>
  70#include <asm/cacheflush.h>
  71#include <asm/mmu_context.h>
  72#include <asm/pgtable.h>
  73
  74#define KERN_TO_HYP(kva)        ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
  75
  76/*
  77 * We currently only support a 40bit IPA.
  78 */
  79#define KVM_PHYS_SHIFT  (40)
  80#define KVM_PHYS_SIZE   (1UL << KVM_PHYS_SHIFT)
  81#define KVM_PHYS_MASK   (KVM_PHYS_SIZE - 1UL)
  82
  83int create_hyp_mappings(void *from, void *to);
  84int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
  85void free_boot_hyp_pgd(void);
  86void free_hyp_pgds(void);
  87
  88void stage2_unmap_vm(struct kvm *kvm);
  89int kvm_alloc_stage2_pgd(struct kvm *kvm);
  90void kvm_free_stage2_pgd(struct kvm *kvm);
  91int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  92                          phys_addr_t pa, unsigned long size, bool writable);
  93
  94int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
  95
  96void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
  97
  98phys_addr_t kvm_mmu_get_httbr(void);
  99phys_addr_t kvm_mmu_get_boot_httbr(void);
 100phys_addr_t kvm_get_idmap_vector(void);
 101int kvm_mmu_init(void);
 102void kvm_clear_hyp_idmap(void);
 103
 104#define kvm_set_pte(ptep, pte)          set_pte(ptep, pte)
 105#define kvm_set_pmd(pmdp, pmd)          set_pmd(pmdp, pmd)
 106
 107static inline void kvm_clean_pgd(pgd_t *pgd) {}
 108static inline void kvm_clean_pmd(pmd_t *pmd) {}
 109static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
 110static inline void kvm_clean_pte(pte_t *pte) {}
 111static inline void kvm_clean_pte_entry(pte_t *pte) {}
 112
 113static inline void kvm_set_s2pte_writable(pte_t *pte)
 114{
 115        pte_val(*pte) |= PTE_S2_RDWR;
 116}
 117
 118static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
 119{
 120        pmd_val(*pmd) |= PMD_S2_RDWR;
 121}
 122
 123static inline void kvm_set_s2pte_readonly(pte_t *pte)
 124{
 125        pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
 126}
 127
 128static inline bool kvm_s2pte_readonly(pte_t *pte)
 129{
 130        return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
 131}
 132
 133static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
 134{
 135        pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
 136}
 137
 138static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
 139{
 140        return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
 141}
 142
 143
 144#define kvm_pgd_addr_end(addr, end)     pgd_addr_end(addr, end)
 145#define kvm_pud_addr_end(addr, end)     pud_addr_end(addr, end)
 146#define kvm_pmd_addr_end(addr, end)     pmd_addr_end(addr, end)
 147
 148/*
 149 * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
 150 * the entire IPA input range with a single pgd entry, and we would only need
 151 * one pgd entry.  Note that in this case, the pgd is actually not used by
 152 * the MMU for Stage-2 translations, but is merely a fake pgd used as a data
 153 * structure for the kernel pgtable macros to work.
 154 */
 155#if PGDIR_SHIFT > KVM_PHYS_SHIFT
 156#define PTRS_PER_S2_PGD_SHIFT   0
 157#else
 158#define PTRS_PER_S2_PGD_SHIFT   (KVM_PHYS_SHIFT - PGDIR_SHIFT)
 159#endif
 160#define PTRS_PER_S2_PGD         (1 << PTRS_PER_S2_PGD_SHIFT)
 161#define S2_PGD_ORDER            get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
 162
 163#define kvm_pgd_index(addr)     (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
 164
 165/*
 166 * If we are concatenating first level stage-2 page tables, we would have less
 167 * than or equal to 16 pointers in the fake PGD, because that's what the
 168 * architecture allows.  In this case, (4 - CONFIG_PGTABLE_LEVELS)
 169 * represents the first level for the host, and we add 1 to go to the next
 170 * level (which uses contatenation) for the stage-2 tables.
 171 */
 172#if PTRS_PER_S2_PGD <= 16
 173#define KVM_PREALLOC_LEVEL      (4 - CONFIG_PGTABLE_LEVELS + 1)
 174#else
 175#define KVM_PREALLOC_LEVEL      (0)
 176#endif
 177
 178static inline void *kvm_get_hwpgd(struct kvm *kvm)
 179{
 180        pgd_t *pgd = kvm->arch.pgd;
 181        pud_t *pud;
 182
 183        if (KVM_PREALLOC_LEVEL == 0)
 184                return pgd;
 185
 186        pud = pud_offset(pgd, 0);
 187        if (KVM_PREALLOC_LEVEL == 1)
 188                return pud;
 189
 190        BUG_ON(KVM_PREALLOC_LEVEL != 2);
 191        return pmd_offset(pud, 0);
 192}
 193
 194static inline unsigned int kvm_get_hwpgd_size(void)
 195{
 196        if (KVM_PREALLOC_LEVEL > 0)
 197                return PTRS_PER_S2_PGD * PAGE_SIZE;
 198        return PTRS_PER_S2_PGD * sizeof(pgd_t);
 199}
 200
 201static inline bool kvm_page_empty(void *ptr)
 202{
 203        struct page *ptr_page = virt_to_page(ptr);
 204        return page_count(ptr_page) == 1;
 205}
 206
 207#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
 208
 209#ifdef __PAGETABLE_PMD_FOLDED
 210#define kvm_pmd_table_empty(kvm, pmdp) (0)
 211#else
 212#define kvm_pmd_table_empty(kvm, pmdp) \
 213        (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
 214#endif
 215
 216#ifdef __PAGETABLE_PUD_FOLDED
 217#define kvm_pud_table_empty(kvm, pudp) (0)
 218#else
 219#define kvm_pud_table_empty(kvm, pudp) \
 220        (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
 221#endif
 222
 223
 224struct kvm;
 225
 226#define kvm_flush_dcache_to_poc(a,l)    __flush_dcache_area((a), (l))
 227
 228static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 229{
 230        return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
 231}
 232
 233static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
 234                                               unsigned long size,
 235                                               bool ipa_uncached)
 236{
 237        void *va = page_address(pfn_to_page(pfn));
 238
 239        if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
 240                kvm_flush_dcache_to_poc(va, size);
 241
 242        if (!icache_is_aliasing()) {            /* PIPT */
 243                flush_icache_range((unsigned long)va,
 244                                   (unsigned long)va + size);
 245        } else if (!icache_is_aivivt()) {       /* non ASID-tagged VIVT */
 246                /* any kind of VIPT cache */
 247                __flush_icache_all();
 248        }
 249}
 250
 251static inline void __kvm_flush_dcache_pte(pte_t pte)
 252{
 253        struct page *page = pte_page(pte);
 254        kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
 255}
 256
 257static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
 258{
 259        struct page *page = pmd_page(pmd);
 260        kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
 261}
 262
 263static inline void __kvm_flush_dcache_pud(pud_t pud)
 264{
 265        struct page *page = pud_page(pud);
 266        kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
 267}
 268
 269#define kvm_virt_to_phys(x)             __virt_to_phys((unsigned long)(x))
 270
 271void kvm_set_way_flush(struct kvm_vcpu *vcpu);
 272void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
 273
 274static inline bool __kvm_cpu_uses_extended_idmap(void)
 275{
 276        return __cpu_uses_extended_idmap();
 277}
 278
 279static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
 280                                       pgd_t *hyp_pgd,
 281                                       pgd_t *merged_hyp_pgd,
 282                                       unsigned long hyp_idmap_start)
 283{
 284        int idmap_idx;
 285
 286        /*
 287         * Use the first entry to access the HYP mappings. It is
 288         * guaranteed to be free, otherwise we wouldn't use an
 289         * extended idmap.
 290         */
 291        VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
 292        merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
 293
 294        /*
 295         * Create another extended level entry that points to the boot HYP map,
 296         * which contains an ID mapping of the HYP init code. We essentially
 297         * merge the boot and runtime HYP maps by doing so, but they don't
 298         * overlap anyway, so this is fine.
 299         */
 300        idmap_idx = hyp_idmap_start >> VA_BITS;
 301        VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
 302        merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
 303}
 304
 305#endif /* __ASSEMBLY__ */
 306#endif /* __ARM64_KVM_MMU_H__ */
 307