linux/arch/arm/include/asm/kvm_mmu.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License, version 2, as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  17 */
  18
  19#ifndef __ARM_KVM_MMU_H__
  20#define __ARM_KVM_MMU_H__
  21
  22#include <asm/memory.h>
  23#include <asm/page.h>
  24
  25/*
  26 * We directly use the kernel VA for the HYP, as we can directly share
  27 * the mapping (HTTBR "covers" TTBR1).
  28 */
  29#define kern_hyp_va(kva)        (kva)
  30
  31/*
  32 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
  33 */
  34#define KVM_MMU_CACHE_MIN_PAGES 2
  35
  36#ifndef __ASSEMBLY__
  37
  38#include <linux/highmem.h>
  39#include <asm/cacheflush.h>
  40#include <asm/cputype.h>
  41#include <asm/kvm_hyp.h>
  42#include <asm/pgalloc.h>
  43#include <asm/stage2_pgtable.h>
  44
  45int create_hyp_mappings(void *from, void *to, pgprot_t prot);
  46int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
  47void free_hyp_pgds(void);
  48
  49void stage2_unmap_vm(struct kvm *kvm);
  50int kvm_alloc_stage2_pgd(struct kvm *kvm);
  51void kvm_free_stage2_pgd(struct kvm *kvm);
  52int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  53                          phys_addr_t pa, unsigned long size, bool writable);
  54
  55int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
  56
  57void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
  58
  59phys_addr_t kvm_mmu_get_httbr(void);
  60phys_addr_t kvm_get_idmap_vector(void);
  61int kvm_mmu_init(void);
  62void kvm_clear_hyp_idmap(void);
  63
  64static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
  65{
  66        *pmd = new_pmd;
  67        dsb(ishst);
  68}
  69
  70static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
  71{
  72        *pte = new_pte;
  73        dsb(ishst);
  74}
  75
  76static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
  77{
  78        pte_val(pte) |= L_PTE_S2_RDWR;
  79        return pte;
  80}
  81
  82static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
  83{
  84        pmd_val(pmd) |= L_PMD_S2_RDWR;
  85        return pmd;
  86}
  87
  88static inline pte_t kvm_s2pte_mkexec(pte_t pte)
  89{
  90        pte_val(pte) &= ~L_PTE_XN;
  91        return pte;
  92}
  93
  94static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
  95{
  96        pmd_val(pmd) &= ~PMD_SECT_XN;
  97        return pmd;
  98}
  99
 100static inline void kvm_set_s2pte_readonly(pte_t *pte)
 101{
 102        pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
 103}
 104
 105static inline bool kvm_s2pte_readonly(pte_t *pte)
 106{
 107        return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
 108}
 109
 110static inline bool kvm_s2pte_exec(pte_t *pte)
 111{
 112        return !(pte_val(*pte) & L_PTE_XN);
 113}
 114
 115static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
 116{
 117        pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
 118}
 119
 120static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
 121{
 122        return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
 123}
 124
 125static inline bool kvm_s2pmd_exec(pmd_t *pmd)
 126{
 127        return !(pmd_val(*pmd) & PMD_SECT_XN);
 128}
 129
 130static inline bool kvm_page_empty(void *ptr)
 131{
 132        struct page *ptr_page = virt_to_page(ptr);
 133        return page_count(ptr_page) == 1;
 134}
 135
 136#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
 137#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
 138#define kvm_pud_table_empty(kvm, pudp) false
 139
 140#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
 141#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
 142#define hyp_pud_table_empty(pudp) false
 143
 144struct kvm;
 145
 146#define kvm_flush_dcache_to_poc(a,l)    __cpuc_flush_dcache_area((a), (l))
 147
 148static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 149{
 150        return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
 151}
 152
 153static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
 154{
 155        /*
 156         * Clean the dcache to the Point of Coherency.
 157         *
 158         * We need to do this through a kernel mapping (using the
 159         * user-space mapping has proved to be the wrong
 160         * solution). For that, we need to kmap one page at a time,
 161         * and iterate over the range.
 162         */
 163
 164        VM_BUG_ON(size & ~PAGE_MASK);
 165
 166        while (size) {
 167                void *va = kmap_atomic_pfn(pfn);
 168
 169                kvm_flush_dcache_to_poc(va, PAGE_SIZE);
 170
 171                size -= PAGE_SIZE;
 172                pfn++;
 173
 174                kunmap_atomic(va);
 175        }
 176}
 177
 178static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
 179                                                  unsigned long size)
 180{
 181        u32 iclsz;
 182
 183        /*
 184         * If we are going to insert an instruction page and the icache is
 185         * either VIPT or PIPT, there is a potential problem where the host
 186         * (or another VM) may have used the same page as this guest, and we
 187         * read incorrect data from the icache.  If we're using a PIPT cache,
 188         * we can invalidate just that page, but if we are using a VIPT cache
 189         * we need to invalidate the entire icache - damn shame - as written
 190         * in the ARM ARM (DDI 0406C.b - Page B3-1393).
 191         *
 192         * VIVT caches are tagged using both the ASID and the VMID and doesn't
 193         * need any kind of flushing (DDI 0406C.b - Page B3-1392).
 194         */
 195
 196        VM_BUG_ON(size & ~PAGE_MASK);
 197
 198        if (icache_is_vivt_asid_tagged())
 199                return;
 200
 201        if (!icache_is_pipt()) {
 202                /* any kind of VIPT cache */
 203                __flush_icache_all();
 204                return;
 205        }
 206
 207        /*
 208         * CTR IminLine contains Log2 of the number of words in the
 209         * cache line, so we can get the number of words as
 210         * 2 << (IminLine - 1).  To get the number of bytes, we
 211         * multiply by 4 (the number of bytes in a 32-bit word), and
 212         * get 4 << (IminLine).
 213         */
 214        iclsz = 4 << (read_cpuid(CPUID_CACHETYPE) & 0xf);
 215
 216        while (size) {
 217                void *va = kmap_atomic_pfn(pfn);
 218                void *end = va + PAGE_SIZE;
 219                void *addr = va;
 220
 221                do {
 222                        write_sysreg(addr, ICIMVAU);
 223                        addr += iclsz;
 224                } while (addr < end);
 225
 226                dsb(ishst);
 227                isb();
 228
 229                size -= PAGE_SIZE;
 230                pfn++;
 231
 232                kunmap_atomic(va);
 233        }
 234
 235        /* Check if we need to invalidate the BTB */
 236        if ((read_cpuid_ext(CPUID_EXT_MMFR1) >> 28) != 4) {
 237                write_sysreg(0, BPIALLIS);
 238                dsb(ishst);
 239                isb();
 240        }
 241}
 242
 243static inline void __kvm_flush_dcache_pte(pte_t pte)
 244{
 245        void *va = kmap_atomic(pte_page(pte));
 246
 247        kvm_flush_dcache_to_poc(va, PAGE_SIZE);
 248
 249        kunmap_atomic(va);
 250}
 251
 252static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
 253{
 254        unsigned long size = PMD_SIZE;
 255        kvm_pfn_t pfn = pmd_pfn(pmd);
 256
 257        while (size) {
 258                void *va = kmap_atomic_pfn(pfn);
 259
 260                kvm_flush_dcache_to_poc(va, PAGE_SIZE);
 261
 262                pfn++;
 263                size -= PAGE_SIZE;
 264
 265                kunmap_atomic(va);
 266        }
 267}
 268
 269static inline void __kvm_flush_dcache_pud(pud_t pud)
 270{
 271}
 272
 273#define kvm_virt_to_phys(x)             virt_to_idmap((unsigned long)(x))
 274
 275void kvm_set_way_flush(struct kvm_vcpu *vcpu);
 276void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
 277
 278static inline bool __kvm_cpu_uses_extended_idmap(void)
 279{
 280        return false;
 281}
 282
 283static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
 284{
 285        return PTRS_PER_PGD;
 286}
 287
 288static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
 289                                       pgd_t *hyp_pgd,
 290                                       pgd_t *merged_hyp_pgd,
 291                                       unsigned long hyp_idmap_start) { }
 292
 293static inline unsigned int kvm_get_vmid_bits(void)
 294{
 295        return 8;
 296}
 297
 298static inline void *kvm_get_hyp_vector(void)
 299{
 300        return kvm_ksym_ref(__kvm_hyp_vector);
 301}
 302
 303static inline int kvm_map_vectors(void)
 304{
 305        return 0;
 306}
 307
 308#define kvm_phys_to_vttbr(addr)         (addr)
 309
 310#endif  /* !__ASSEMBLY__ */
 311
 312#endif /* __ARM_KVM_MMU_H__ */
 313