linux/arch/arm64/kvm/hyp/nvhe/mm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2020 Google LLC
   4 * Author: Quentin Perret <qperret@google.com>
   5 */
   6
   7#include <linux/kvm_host.h>
   8#include <asm/kvm_hyp.h>
   9#include <asm/kvm_mmu.h>
  10#include <asm/kvm_pgtable.h>
  11#include <asm/spectre.h>
  12
  13#include <nvhe/early_alloc.h>
  14#include <nvhe/gfp.h>
  15#include <nvhe/memory.h>
  16#include <nvhe/mm.h>
  17#include <nvhe/spinlock.h>
  18
  19struct kvm_pgtable pkvm_pgtable;
  20hyp_spinlock_t pkvm_pgd_lock;
  21u64 __io_map_base;
  22
  23struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
  24unsigned int hyp_memblock_nr;
  25
  26static int __pkvm_create_mappings(unsigned long start, unsigned long size,
  27                                  unsigned long phys, enum kvm_pgtable_prot prot)
  28{
  29        int err;
  30
  31        hyp_spin_lock(&pkvm_pgd_lock);
  32        err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
  33        hyp_spin_unlock(&pkvm_pgd_lock);
  34
  35        return err;
  36}
  37
  38unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
  39                                            enum kvm_pgtable_prot prot)
  40{
  41        unsigned long addr;
  42        int err;
  43
  44        hyp_spin_lock(&pkvm_pgd_lock);
  45
  46        size = PAGE_ALIGN(size + offset_in_page(phys));
  47        addr = __io_map_base;
  48        __io_map_base += size;
  49
  50        /* Are we overflowing on the vmemmap ? */
  51        if (__io_map_base > __hyp_vmemmap) {
  52                __io_map_base -= size;
  53                addr = (unsigned long)ERR_PTR(-ENOMEM);
  54                goto out;
  55        }
  56
  57        err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot);
  58        if (err) {
  59                addr = (unsigned long)ERR_PTR(err);
  60                goto out;
  61        }
  62
  63        addr = addr + offset_in_page(phys);
  64out:
  65        hyp_spin_unlock(&pkvm_pgd_lock);
  66
  67        return addr;
  68}
  69
  70int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
  71{
  72        unsigned long start = (unsigned long)from;
  73        unsigned long end = (unsigned long)to;
  74        unsigned long virt_addr;
  75        phys_addr_t phys;
  76
  77        hyp_assert_lock_held(&pkvm_pgd_lock);
  78
  79        start = start & PAGE_MASK;
  80        end = PAGE_ALIGN(end);
  81
  82        for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
  83                int err;
  84
  85                phys = hyp_virt_to_phys((void *)virt_addr);
  86                err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
  87                                          phys, prot);
  88                if (err)
  89                        return err;
  90        }
  91
  92        return 0;
  93}
  94
  95int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
  96{
  97        int ret;
  98
  99        hyp_spin_lock(&pkvm_pgd_lock);
 100        ret = pkvm_create_mappings_locked(from, to, prot);
 101        hyp_spin_unlock(&pkvm_pgd_lock);
 102
 103        return ret;
 104}
 105
 106int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
 107{
 108        unsigned long start, end;
 109
 110        hyp_vmemmap_range(phys, size, &start, &end);
 111
 112        return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
 113}
 114
 115static void *__hyp_bp_vect_base;
 116int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
 117{
 118        void *vector;
 119
 120        switch (slot) {
 121        case HYP_VECTOR_DIRECT: {
 122                vector = __kvm_hyp_vector;
 123                break;
 124        }
 125        case HYP_VECTOR_SPECTRE_DIRECT: {
 126                vector = __bp_harden_hyp_vecs;
 127                break;
 128        }
 129        case HYP_VECTOR_INDIRECT:
 130        case HYP_VECTOR_SPECTRE_INDIRECT: {
 131                vector = (void *)__hyp_bp_vect_base;
 132                break;
 133        }
 134        default:
 135                return -EINVAL;
 136        }
 137
 138        vector = __kvm_vector_slot2addr(vector, slot);
 139        *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
 140
 141        return 0;
 142}
 143
 144int hyp_map_vectors(void)
 145{
 146        phys_addr_t phys;
 147        void *bp_base;
 148
 149        if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
 150                return 0;
 151
 152        phys = __hyp_pa(__bp_harden_hyp_vecs);
 153        bp_base = (void *)__pkvm_create_private_mapping(phys,
 154                                                        __BP_HARDEN_HYP_VECS_SZ,
 155                                                        PAGE_HYP_EXEC);
 156        if (IS_ERR_OR_NULL(bp_base))
 157                return PTR_ERR(bp_base);
 158
 159        __hyp_bp_vect_base = bp_base;
 160
 161        return 0;
 162}
 163
 164int hyp_create_idmap(u32 hyp_va_bits)
 165{
 166        unsigned long start, end;
 167
 168        start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
 169        start = ALIGN_DOWN(start, PAGE_SIZE);
 170
 171        end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
 172        end = ALIGN(end, PAGE_SIZE);
 173
 174        /*
 175         * One half of the VA space is reserved to linearly map portions of
 176         * memory -- see va_layout.c for more details. The other half of the VA
 177         * space contains the trampoline page, and needs some care. Split that
 178         * second half in two and find the quarter of VA space not conflicting
 179         * with the idmap to place the IOs and the vmemmap. IOs use the lower
 180         * half of the quarter and the vmemmap the upper half.
 181         */
 182        __io_map_base = start & BIT(hyp_va_bits - 2);
 183        __io_map_base ^= BIT(hyp_va_bits - 2);
 184        __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
 185
 186        return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
 187}
 188