linux/arch/arm64/kvm/hyp/reserved_mem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2020 - Google LLC
   4 * Author: Quentin Perret <qperret@google.com>
   5 */
   6
   7#include <linux/kvm_host.h>
   8#include <linux/memblock.h>
   9#include <linux/sort.h>
  10
  11#include <asm/kvm_host.h>
  12
  13#include <nvhe/memory.h>
  14#include <nvhe/mm.h>
  15
  16static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
  17static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
  18
  19phys_addr_t hyp_mem_base;
  20phys_addr_t hyp_mem_size;
  21
  22static int cmp_hyp_memblock(const void *p1, const void *p2)
  23{
  24        const struct memblock_region *r1 = p1;
  25        const struct memblock_region *r2 = p2;
  26
  27        return r1->base < r2->base ? -1 : (r1->base > r2->base);
  28}
  29
  30static void __init sort_memblock_regions(void)
  31{
  32        sort(hyp_memory,
  33             *hyp_memblock_nr_ptr,
  34             sizeof(struct memblock_region),
  35             cmp_hyp_memblock,
  36             NULL);
  37}
  38
  39static int __init register_memblock_regions(void)
  40{
  41        struct memblock_region *reg;
  42
  43        for_each_mem_region(reg) {
  44                if (*hyp_memblock_nr_ptr >= HYP_MEMBLOCK_REGIONS)
  45                        return -ENOMEM;
  46
  47                hyp_memory[*hyp_memblock_nr_ptr] = *reg;
  48                (*hyp_memblock_nr_ptr)++;
  49        }
  50        sort_memblock_regions();
  51
  52        return 0;
  53}
  54
  55void __init kvm_hyp_reserve(void)
  56{
  57        u64 nr_pages, prev, hyp_mem_pages = 0;
  58        int ret;
  59
  60        if (!is_hyp_mode_available() || is_kernel_in_hyp_mode())
  61                return;
  62
  63        if (kvm_get_mode() != KVM_MODE_PROTECTED)
  64                return;
  65
  66        ret = register_memblock_regions();
  67        if (ret) {
  68                *hyp_memblock_nr_ptr = 0;
  69                kvm_err("Failed to register hyp memblocks: %d\n", ret);
  70                return;
  71        }
  72
  73        hyp_mem_pages += hyp_s1_pgtable_pages();
  74        hyp_mem_pages += host_s2_pgtable_pages();
  75
  76        /*
  77         * The hyp_vmemmap needs to be backed by pages, but these pages
  78         * themselves need to be present in the vmemmap, so compute the number
  79         * of pages needed by looking for a fixed point.
  80         */
  81        nr_pages = 0;
  82        do {
  83                prev = nr_pages;
  84                nr_pages = hyp_mem_pages + prev;
  85                nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE);
  86                nr_pages += __hyp_pgtable_max_pages(nr_pages);
  87        } while (nr_pages != prev);
  88        hyp_mem_pages += nr_pages;
  89
  90        /*
  91         * Try to allocate a PMD-aligned region to reduce TLB pressure once
  92         * this is unmapped from the host stage-2, and fallback to PAGE_SIZE.
  93         */
  94        hyp_mem_size = hyp_mem_pages << PAGE_SHIFT;
  95        hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE),
  96                                           PMD_SIZE);
  97        if (!hyp_mem_base)
  98                hyp_mem_base = memblock_phys_alloc(hyp_mem_size, PAGE_SIZE);
  99        else
 100                hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);
 101
 102        if (!hyp_mem_base) {
 103                kvm_err("Failed to reserve hyp memory\n");
 104                return;
 105        }
 106
 107        kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
 108                 hyp_mem_base);
 109}
 110