linux/arch/powerpc/kvm/book3s_hv_builtin.c
<<
>>
Prefs
   1/*
   2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License, version 2, as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/cpu.h>
  10#include <linux/kvm_host.h>
  11#include <linux/preempt.h>
  12#include <linux/export.h>
  13#include <linux/sched.h>
  14#include <linux/spinlock.h>
  15#include <linux/bootmem.h>
  16#include <linux/init.h>
  17#include <linux/memblock.h>
  18#include <linux/sizes.h>
  19#include <linux/cma.h>
  20
  21#include <asm/cputable.h>
  22#include <asm/kvm_ppc.h>
  23#include <asm/kvm_book3s.h>
  24
  25#define KVM_CMA_CHUNK_ORDER     18
  26
  27/*
  28 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
  29 * should be power of 2.
  30 */
  31#define HPT_ALIGN_PAGES         ((1 << 18) >> PAGE_SHIFT) /* 256k */
  32/*
  33 * By default we reserve 5% of memory for hash pagetable allocation.
  34 */
  35static unsigned long kvm_cma_resv_ratio = 5;
  36/*
  37 * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
  38 * Each RMA has to be physically contiguous and of a size that the
  39 * hardware supports.  PPC970 and POWER7 support 64MB, 128MB and 256MB,
  40 * and other larger sizes.  Since we are unlikely to be allocate that
  41 * much physically contiguous memory after the system is up and running,
  42 * we preallocate a set of RMAs in early boot using CMA.
  43 * should be power of 2.
  44 */
  45unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT;  /* 128MB */
  46EXPORT_SYMBOL_GPL(kvm_rma_pages);
  47
  48static struct cma *kvm_cma;
  49
  50/* Work out RMLS (real mode limit selector) field value for a given RMA size.
  51   Assumes POWER7 or PPC970. */
  52static inline int lpcr_rmls(unsigned long rma_size)
  53{
  54        switch (rma_size) {
  55        case 32ul << 20:        /* 32 MB */
  56                if (cpu_has_feature(CPU_FTR_ARCH_206))
  57                        return 8;       /* only supported on POWER7 */
  58                return -1;
  59        case 64ul << 20:        /* 64 MB */
  60                return 3;
  61        case 128ul << 20:       /* 128 MB */
  62                return 7;
  63        case 256ul << 20:       /* 256 MB */
  64                return 4;
  65        case 1ul << 30:         /* 1 GB */
  66                return 2;
  67        case 16ul << 30:        /* 16 GB */
  68                return 1;
  69        case 256ul << 30:       /* 256 GB */
  70                return 0;
  71        default:
  72                return -1;
  73        }
  74}
  75
  76static int __init early_parse_rma_size(char *p)
  77{
  78        unsigned long kvm_rma_size;
  79
  80        pr_debug("%s(%s)\n", __func__, p);
  81        if (!p)
  82                return -EINVAL;
  83        kvm_rma_size = memparse(p, &p);
  84        /*
  85         * Check that the requested size is one supported in hardware
  86         */
  87        if (lpcr_rmls(kvm_rma_size) < 0) {
  88                pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
  89                return -EINVAL;
  90        }
  91        kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
  92        return 0;
  93}
  94early_param("kvm_rma_size", early_parse_rma_size);
  95
  96struct kvm_rma_info *kvm_alloc_rma()
  97{
  98        struct page *page;
  99        struct kvm_rma_info *ri;
 100
 101        ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
 102        if (!ri)
 103                return NULL;
 104        page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
 105        if (!page)
 106                goto err_out;
 107        atomic_set(&ri->use_count, 1);
 108        ri->base_pfn = page_to_pfn(page);
 109        return ri;
 110err_out:
 111        kfree(ri);
 112        return NULL;
 113}
 114EXPORT_SYMBOL_GPL(kvm_alloc_rma);
 115
 116void kvm_release_rma(struct kvm_rma_info *ri)
 117{
 118        if (atomic_dec_and_test(&ri->use_count)) {
 119                cma_release(kvm_cma, pfn_to_page(ri->base_pfn), kvm_rma_pages);
 120                kfree(ri);
 121        }
 122}
 123EXPORT_SYMBOL_GPL(kvm_release_rma);
 124
 125static int __init early_parse_kvm_cma_resv(char *p)
 126{
 127        pr_debug("%s(%s)\n", __func__, p);
 128        if (!p)
 129                return -EINVAL;
 130        return kstrtoul(p, 0, &kvm_cma_resv_ratio);
 131}
 132early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
 133
 134struct page *kvm_alloc_hpt(unsigned long nr_pages)
 135{
 136        unsigned long align_pages = HPT_ALIGN_PAGES;
 137
 138        VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 139
 140        /* Old CPUs require HPT aligned on a multiple of its size */
 141        if (!cpu_has_feature(CPU_FTR_ARCH_206))
 142                align_pages = nr_pages;
 143        return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
 144}
 145EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
 146
 147void kvm_release_hpt(struct page *page, unsigned long nr_pages)
 148{
 149        cma_release(kvm_cma, page, nr_pages);
 150}
 151EXPORT_SYMBOL_GPL(kvm_release_hpt);
 152
 153/**
 154 * kvm_cma_reserve() - reserve area for kvm hash pagetable
 155 *
 156 * This function reserves memory from early allocator. It should be
 157 * called by arch specific code once the early allocator (memblock or bootmem)
 158 * has been activated and all other subsystems have already allocated/reserved
 159 * memory.
 160 */
 161void __init kvm_cma_reserve(void)
 162{
 163        unsigned long align_size;
 164        struct memblock_region *reg;
 165        phys_addr_t selected_size = 0;
 166
 167        /*
 168         * We need CMA reservation only when we are in HV mode
 169         */
 170        if (!cpu_has_feature(CPU_FTR_HVMODE))
 171                return;
 172        /*
 173         * We cannot use memblock_phys_mem_size() here, because
 174         * memblock_analyze() has not been called yet.
 175         */
 176        for_each_memblock(memory, reg)
 177                selected_size += memblock_region_memory_end_pfn(reg) -
 178                                 memblock_region_memory_base_pfn(reg);
 179
 180        selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
 181        if (selected_size) {
 182                pr_debug("%s: reserving %ld MiB for global area\n", __func__,
 183                         (unsigned long)selected_size / SZ_1M);
 184                /*
 185                 * Old CPUs require HPT aligned on a multiple of its size. So for them
 186                 * make the alignment as max size we could request.
 187                 */
 188                if (!cpu_has_feature(CPU_FTR_ARCH_206))
 189                        align_size = __rounddown_pow_of_two(selected_size);
 190                else
 191                        align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
 192
 193                align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
 194                cma_declare_contiguous(0, selected_size, 0, align_size,
 195                        KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
 196        }
 197}
 198
 199/*
 200 * When running HV mode KVM we need to block certain operations while KVM VMs
 201 * exist in the system. We use a counter of VMs to track this.
 202 *
 203 * One of the operations we need to block is onlining of secondaries, so we
 204 * protect hv_vm_count with get/put_online_cpus().
 205 */
 206static atomic_t hv_vm_count;
 207
 208void kvm_hv_vm_activated(void)
 209{
 210        get_online_cpus();
 211        atomic_inc(&hv_vm_count);
 212        put_online_cpus();
 213}
 214EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
 215
 216void kvm_hv_vm_deactivated(void)
 217{
 218        get_online_cpus();
 219        atomic_dec(&hv_vm_count);
 220        put_online_cpus();
 221}
 222EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
 223
 224bool kvm_hv_mode_active(void)
 225{
 226        return atomic_read(&hv_vm_count) != 0;
 227}
 228
 229extern int hcall_real_table[], hcall_real_table_end[];
 230
 231int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
 232{
 233        cmd /= 4;
 234        if (cmd < hcall_real_table_end - hcall_real_table &&
 235            hcall_real_table[cmd])
 236                return 1;
 237
 238        return 0;
 239}
 240EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
 241