linux/arch/powerpc/kvm/book3s_hv_builtin.c
<<
>>
Prefs
   1/*
   2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License, version 2, as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/cpu.h>
  10#include <linux/kvm_host.h>
  11#include <linux/preempt.h>
  12#include <linux/export.h>
  13#include <linux/sched.h>
  14#include <linux/spinlock.h>
  15#include <linux/bootmem.h>
  16#include <linux/init.h>
  17#include <linux/memblock.h>
  18#include <linux/sizes.h>
  19
  20#include <asm/cputable.h>
  21#include <asm/kvm_ppc.h>
  22#include <asm/kvm_book3s.h>
  23
  24#include "book3s_hv_cma.h"
  25/*
  26 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
  27 * should be power of 2.
  28 */
  29#define HPT_ALIGN_PAGES         ((1 << 18) >> PAGE_SHIFT) /* 256k */
  30/*
  31 * By default we reserve 5% of memory for hash pagetable allocation.
  32 */
  33static unsigned long kvm_cma_resv_ratio = 5;
  34/*
  35 * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
  36 * Each RMA has to be physically contiguous and of a size that the
  37 * hardware supports.  PPC970 and POWER7 support 64MB, 128MB and 256MB,
  38 * and other larger sizes.  Since we are unlikely to be allocate that
  39 * much physically contiguous memory after the system is up and running,
  40 * we preallocate a set of RMAs in early boot using CMA.
  41 * should be power of 2.
  42 */
  43unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT;  /* 128MB */
  44EXPORT_SYMBOL_GPL(kvm_rma_pages);
  45
  46/* Work out RMLS (real mode limit selector) field value for a given RMA size.
  47   Assumes POWER7 or PPC970. */
  48static inline int lpcr_rmls(unsigned long rma_size)
  49{
  50        switch (rma_size) {
  51        case 32ul << 20:        /* 32 MB */
  52                if (cpu_has_feature(CPU_FTR_ARCH_206))
  53                        return 8;       /* only supported on POWER7 */
  54                return -1;
  55        case 64ul << 20:        /* 64 MB */
  56                return 3;
  57        case 128ul << 20:       /* 128 MB */
  58                return 7;
  59        case 256ul << 20:       /* 256 MB */
  60                return 4;
  61        case 1ul << 30:         /* 1 GB */
  62                return 2;
  63        case 16ul << 30:        /* 16 GB */
  64                return 1;
  65        case 256ul << 30:       /* 256 GB */
  66                return 0;
  67        default:
  68                return -1;
  69        }
  70}
  71
  72static int __init early_parse_rma_size(char *p)
  73{
  74        unsigned long kvm_rma_size;
  75
  76        pr_debug("%s(%s)\n", __func__, p);
  77        if (!p)
  78                return -EINVAL;
  79        kvm_rma_size = memparse(p, &p);
  80        /*
  81         * Check that the requested size is one supported in hardware
  82         */
  83        if (lpcr_rmls(kvm_rma_size) < 0) {
  84                pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
  85                return -EINVAL;
  86        }
  87        kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
  88        return 0;
  89}
  90early_param("kvm_rma_size", early_parse_rma_size);
  91
  92struct kvm_rma_info *kvm_alloc_rma()
  93{
  94        struct page *page;
  95        struct kvm_rma_info *ri;
  96
  97        ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
  98        if (!ri)
  99                return NULL;
 100        page = kvm_alloc_cma(kvm_rma_pages, kvm_rma_pages);
 101        if (!page)
 102                goto err_out;
 103        atomic_set(&ri->use_count, 1);
 104        ri->base_pfn = page_to_pfn(page);
 105        return ri;
 106err_out:
 107        kfree(ri);
 108        return NULL;
 109}
 110EXPORT_SYMBOL_GPL(kvm_alloc_rma);
 111
 112void kvm_release_rma(struct kvm_rma_info *ri)
 113{
 114        if (atomic_dec_and_test(&ri->use_count)) {
 115                kvm_release_cma(pfn_to_page(ri->base_pfn), kvm_rma_pages);
 116                kfree(ri);
 117        }
 118}
 119EXPORT_SYMBOL_GPL(kvm_release_rma);
 120
 121static int __init early_parse_kvm_cma_resv(char *p)
 122{
 123        pr_debug("%s(%s)\n", __func__, p);
 124        if (!p)
 125                return -EINVAL;
 126        return kstrtoul(p, 0, &kvm_cma_resv_ratio);
 127}
 128early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
 129
 130struct page *kvm_alloc_hpt(unsigned long nr_pages)
 131{
 132        unsigned long align_pages = HPT_ALIGN_PAGES;
 133
 134        /* Old CPUs require HPT aligned on a multiple of its size */
 135        if (!cpu_has_feature(CPU_FTR_ARCH_206))
 136                align_pages = nr_pages;
 137        return kvm_alloc_cma(nr_pages, align_pages);
 138}
 139EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
 140
 141void kvm_release_hpt(struct page *page, unsigned long nr_pages)
 142{
 143        kvm_release_cma(page, nr_pages);
 144}
 145EXPORT_SYMBOL_GPL(kvm_release_hpt);
 146
 147/**
 148 * kvm_cma_reserve() - reserve area for kvm hash pagetable
 149 *
 150 * This function reserves memory from early allocator. It should be
 151 * called by arch specific code once the early allocator (memblock or bootmem)
 152 * has been activated and all other subsystems have already allocated/reserved
 153 * memory.
 154 */
 155void __init kvm_cma_reserve(void)
 156{
 157        unsigned long align_size;
 158        struct memblock_region *reg;
 159        phys_addr_t selected_size = 0;
 160        /*
 161         * We cannot use memblock_phys_mem_size() here, because
 162         * memblock_analyze() has not been called yet.
 163         */
 164        for_each_memblock(memory, reg)
 165                selected_size += memblock_region_memory_end_pfn(reg) -
 166                                 memblock_region_memory_base_pfn(reg);
 167
 168        selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
 169        if (selected_size) {
 170                pr_debug("%s: reserving %ld MiB for global area\n", __func__,
 171                         (unsigned long)selected_size / SZ_1M);
 172                /*
 173                 * Old CPUs require HPT aligned on a multiple of its size. So for them
 174                 * make the alignment as max size we could request.
 175                 */
 176                if (!cpu_has_feature(CPU_FTR_ARCH_206))
 177                        align_size = __rounddown_pow_of_two(selected_size);
 178                else
 179                        align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
 180
 181                align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
 182                kvm_cma_declare_contiguous(selected_size, align_size);
 183        }
 184}
 185
 186/*
 187 * When running HV mode KVM we need to block certain operations while KVM VMs
 188 * exist in the system. We use a counter of VMs to track this.
 189 *
 190 * One of the operations we need to block is onlining of secondaries, so we
 191 * protect hv_vm_count with get/put_online_cpus().
 192 */
 193static atomic_t hv_vm_count;
 194
 195void kvm_hv_vm_activated(void)
 196{
 197        get_online_cpus();
 198        atomic_inc(&hv_vm_count);
 199        put_online_cpus();
 200}
 201EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
 202
 203void kvm_hv_vm_deactivated(void)
 204{
 205        get_online_cpus();
 206        atomic_dec(&hv_vm_count);
 207        put_online_cpus();
 208}
 209EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
 210
 211bool kvm_hv_mode_active(void)
 212{
 213        return atomic_read(&hv_vm_count) != 0;
 214}
 215